Compare commits
9 Commits
v3.0.0.RC2
...
kafka-0.9-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea87792c76 | ||
|
|
c3c8935014 | ||
|
|
92da0fe3e9 | ||
|
|
6a60c76dd9 | ||
|
|
b3c26cf93f | ||
|
|
3b0bf53896 | ||
|
|
d9670e040b | ||
|
|
fd28764e39 | ||
|
|
079592363d |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,7 +18,6 @@ _site/
|
||||
*.ipr
|
||||
*.iws
|
||||
.idea/*
|
||||
*/.idea
|
||||
.factorypath
|
||||
dump.rdb
|
||||
.apt_generated
|
||||
|
||||
110
.mvn/wrapper/MavenWrapperDownloader.java
vendored
110
.mvn/wrapper/MavenWrapperDownloader.java
vendored
@@ -1,110 +0,0 @@
|
||||
/*
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
*/
|
||||
|
||||
import java.net.*;
|
||||
import java.io.*;
|
||||
import java.nio.channels.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class MavenWrapperDownloader {
|
||||
|
||||
/**
|
||||
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
|
||||
*/
|
||||
private static final String DEFAULT_DOWNLOAD_URL =
|
||||
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar";
|
||||
|
||||
/**
|
||||
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
|
||||
* use instead of the default one.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
|
||||
".mvn/wrapper/maven-wrapper.properties";
|
||||
|
||||
/**
|
||||
* Path where the maven-wrapper.jar will be saved to.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_JAR_PATH =
|
||||
".mvn/wrapper/maven-wrapper.jar";
|
||||
|
||||
/**
|
||||
* Name of the property which should be used to override the default download url for the wrapper.
|
||||
*/
|
||||
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("- Downloader started");
|
||||
File baseDirectory = new File(args[0]);
|
||||
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
|
||||
|
||||
// If the maven-wrapper.properties exists, read it and check if it contains a custom
|
||||
// wrapperUrl parameter.
|
||||
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
|
||||
String url = DEFAULT_DOWNLOAD_URL;
|
||||
if(mavenWrapperPropertyFile.exists()) {
|
||||
FileInputStream mavenWrapperPropertyFileInputStream = null;
|
||||
try {
|
||||
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
|
||||
Properties mavenWrapperProperties = new Properties();
|
||||
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
|
||||
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
|
||||
} catch (IOException e) {
|
||||
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
|
||||
} finally {
|
||||
try {
|
||||
if(mavenWrapperPropertyFileInputStream != null) {
|
||||
mavenWrapperPropertyFileInputStream.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// Ignore ...
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading from: : " + url);
|
||||
|
||||
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
|
||||
if(!outputFile.getParentFile().exists()) {
|
||||
if(!outputFile.getParentFile().mkdirs()) {
|
||||
System.out.println(
|
||||
"- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'");
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
|
||||
try {
|
||||
downloadFileFromURL(url, outputFile);
|
||||
System.out.println("Done");
|
||||
System.exit(0);
|
||||
} catch (Throwable e) {
|
||||
System.out.println("- Error downloading");
|
||||
e.printStackTrace();
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
|
||||
URL website = new URL(urlString);
|
||||
ReadableByteChannel rbc;
|
||||
rbc = Channels.newChannel(website.openStream());
|
||||
FileOutputStream fos = new FileOutputStream(destination);
|
||||
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
|
||||
fos.close();
|
||||
rbc.close();
|
||||
}
|
||||
|
||||
}
|
||||
BIN
.mvn/wrapper/maven-wrapper.jar
vendored
Executable file → Normal file
BIN
.mvn/wrapper/maven-wrapper.jar
vendored
Executable file → Normal file
Binary file not shown.
2
.mvn/wrapper/maven-wrapper.properties
vendored
Executable file → Normal file
2
.mvn/wrapper/maven-wrapper.properties
vendored
Executable file → Normal file
@@ -1 +1 @@
|
||||
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip
|
||||
distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.3.3/apache-maven-3.3.3-bin.zip
|
||||
836
README.adoc
836
README.adoc
@@ -1,835 +1,3 @@
|
||||
////
|
||||
DO NOT EDIT THIS FILE. IT WAS GENERATED.
|
||||
Manual changes to this file will be lost when it is generated again.
|
||||
Edit the files in the src/main/asciidoc/ directory instead.
|
||||
////
|
||||
# spring-cloud-stream-binder-kafka
|
||||
|
||||
:jdkversion: 1.8
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
|
||||
image::https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka.svg?style=svg["CircleCI", link="https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka/branch/{github-tag}/graph/badge.svg["codecov", link="https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.svg[Gitter, link="https://gitter.im/spring-cloud/spring-cloud-stream-binder-kafka?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
//= Overview
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
=== Overview
|
||||
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
.Kafka Binder
|
||||
image::{github-raw}/docs/src/main/asciidoc/images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` version `2.3.1`.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
=== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
==== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Unknown Kafka producer or consumer properties provided through this configuration are filtered out and not allowed to propagate.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
In addition to support known Kafka consumer properties, unknown consumer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
In addition to support known Kafka producer properties, unknown producer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.create.topics.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `BinderHeaderMapper` bean that uses JSON deserialization for the headers.
|
||||
If this custom `BinderHeaderMapper` bean is not made available to the binder using this property, then the binder will look for a header mapper bean with the name `kafkaBinderHeaderMapper` that is of type `BinderHeaderMapper` before falling back to a default `BinderHeaderMapper` created by the binder.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
==== Kafka Consumer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.consumer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Must be false if a `KafkaRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
By default, a failed record is sent to the same partition number in the DLQ topic as the original record.
|
||||
See <<dlq-partition-selection>> for how to change that behavior.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
dlqPartitions::
|
||||
When `enableDlq` is true, and this property is not set, a dead letter topic with the same number of partitions as the primary topic(s) is created.
|
||||
Usually, dead-letter records are sent to the same partition in the dead-letter topic as the original record.
|
||||
This behavior can be changed; see <<dlq-partition-selection>>.
|
||||
If this property is set to `1` and there is no `DqlPartitionFunction` bean, all dead-letter records will be written to partition `0`.
|
||||
If this property is greater than `1`, you **MUST** provide a `DlqPartitionFunction` bean.
|
||||
Note that the actual partition count is affected by the binder's `minPartitionCount` property.
|
||||
+
|
||||
Default: `none`
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
For example some properties needed by the application such as `spring.cloud.stream.kafka.bindings.input.consumer.configuration.foo=bar`.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
When native decoding is enabled on the consumer (i.e., useNativeDecoding: true) , the application must provide corresponding key/value serializers for DLQ.
|
||||
This must be provided in the form of `dlqProducerProperties.configuration.key.serializer` and `dlqProducerProperties.configuration.value.serializer`.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
pollTimeout::
|
||||
Timeout used for polling in pollable consumers.
|
||||
+
|
||||
Default: 5 seconds.
|
||||
|
||||
==== Consuming Batches
|
||||
|
||||
Starting with version 3.0, when `spring.cloud.stream.binding.<name>.consumer.batch-mode` is set to `true`, all of the records received by polling the Kafka `Consumer` will be presented as a `List<?>` to the listener method.
|
||||
Otherwise, the method will be called with one record at a time.
|
||||
The size of the batch is controlled by Kafka consumer properties `max.poll.records`, `min.fetch.bytes`, `fetch.max.wait.ms`; refer to the Kafka documentation for more information.
|
||||
|
||||
IMPORTANT: Retry within the binder is not supported when using batch mode, so `maxAttempts` will be overridden to 1.
|
||||
You can configure a `SeekToCurrentBatchErrorHandler` (using a `ListenerContainerCustomizer`) to achieve similar functionality to retry in the binder.
|
||||
You can also use a manual `AckMode` and call `Ackowledgment.nack(index, sleep)` to commit the offsets for a partial batch and have the remaining records redelivered.
|
||||
Refer to the https://docs.spring.io/spring-kafka/docs/2.3.0.BUILD-SNAPSHOT/reference/html/#committing-offsets[Spring for Apache Kafka documentation] for more information about these techniques.
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
==== Kafka Producer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.producer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
sendTimeoutExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to evaluate the time to wait for ack when synchronous publish is enabled -- for example, `headers['mySendTimeout']`.
|
||||
The value of the timeout is in milliseconds.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.output.producer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
useTopicHeader::
|
||||
Set to `true` to override the default binding destination (topic name) with the value of the `KafkaHeaders.TOPIC` message header in the outbound message.
|
||||
If the header is not present, the default binding destination is used.
|
||||
Default: `false`.
|
||||
+
|
||||
recordMetadataChannel::
|
||||
The bean name of a `MessageChannel` to which successful send results should be sent; the bean must exist in the application context.
|
||||
The message sent to the channel is the sent message (after conversion, if any) with an additional header `KafkaHeaders.RECORD_METADATA`.
|
||||
The header contains a `RecordMetadata` object provided by the Kafka client; it includes the partition and offset where the record was written in the topic.
|
||||
|
||||
`ResultMetadata meta = sendResultMsg.getHeaders().get(KafkaHeaders.RECORD_METADATA, RecordMetadata.class)`
|
||||
|
||||
Failed sends go the producer error channel (if configured); see <<kafka-error-channels>>.
|
||||
Default: null
|
||||
+
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
compression::
|
||||
Set the `compression.type` producer property.
|
||||
Supported values are `none`, `gzip`, `snappy` and `lz4`.
|
||||
If you override the `kafka-clients` jar to 2.1.0 (or later), as discussed in the https://docs.spring.io/spring-kafka/docs/2.2.x/reference/html/deps-for-21x.html[Spring for Apache Kafka documentation], and wish to use `zstd` compression, use `spring.cloud.stream.kafka.bindings.<binding-name>.producer.configuration.compression.type=zstd`.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
==== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
===== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
===== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the https://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 https://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the https://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
====== Using JAAS Configuration Files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
====== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
[[pause-resume]]
|
||||
===== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-transactional-binder]]
|
||||
=== Transactional Binder
|
||||
|
||||
Enable transactions by setting `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` to a non-empty value, e.g. `tx-`.
|
||||
When used in a processor application, the consumer starts the transaction; any records sent on the consumer thread participate in the same transaction.
|
||||
When the listener exits normally, the listener container will send the offset to the transaction and commit it.
|
||||
A common producer factory is used for all producer bindings configured using `spring.cloud.stream.kafka.binder.transaction.producer.*` properties; individual binding Kafka producer properties are ignored.
|
||||
|
||||
If you wish to use transactions in a source application, or from some arbitrary thread for producer-only transaction (e.g. `@Scheduled` method), you must get a reference to the transactional producer factory and define a `KafkaTransactionManager` bean using it.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public PlatformTransactionManager transactionManager(BinderFactory binders) {
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
return new KafkaTransactionManager<>(pf);
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Notice that we get a reference to the binder using the `BinderFactory`; use `null` in the first argument when there is only one binder configured.
|
||||
If more than one binder is configured, use the binder name to get the reference.
|
||||
Once we have a reference to the binder, we can obtain a reference to the `ProducerFactory` and create a transaction manager.
|
||||
|
||||
Then you would use normal Spring transaction support, e.g. `TransactionTemplate` or `@Transactional`, for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public static class Sender {
|
||||
|
||||
@Transactional
|
||||
public void doInTransaction(MessageChannel output, List<String> stuffToSend) {
|
||||
stuffToSend.forEach(stuff -> output.send(new GenericMessage<>(stuff)));
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
If you wish to synchronize producer-only transactions with those from some other transaction manager, use a `ChainedTransactionManager`.
|
||||
|
||||
[[kafka-error-channels]]
|
||||
=== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
=== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
[[kafka-tombstones]]
|
||||
=== Tombstone Records (null record values)
|
||||
|
||||
When using compacted topics, a record with a `null` value (also called a tombstone record) represents the deletion of a key.
|
||||
To receive such messages in a `@StreamListener` method, the parameter must be marked as not required to receive a `null` value argument.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) byte[] key,
|
||||
@Payload(required = false) Customer customer) {
|
||||
// customer is null if a tombstone record
|
||||
...
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
[[rebalance-listener]]
|
||||
=== Using a KafkaRebalanceListener
|
||||
|
||||
Applications may wish to seek topics/partitions to arbitrary offsets when the partitions are initially assigned, or perform other operations on the consumer.
|
||||
Starting with version 2.1, if you provide a single `KafkaRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public interface KafkaBindingRebalanceListener {
|
||||
|
||||
/**
|
||||
* Invoked by the container before any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedBeforeCommit(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by the container after any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedAfterCommit(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance.
|
||||
* Applications might only want to perform seek operations on an initial assignment.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions,
|
||||
boolean initial) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
You cannot set the `resetOffsets` consumer property to `true` when you provide a rebalance listener.
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
[[building]]
|
||||
== Building
|
||||
|
||||
:jdkversion: 1.7
|
||||
|
||||
=== Basic Compile and Test
|
||||
|
||||
To build the source you will need to install JDK {jdkversion}.
|
||||
|
||||
The build uses the Maven wrapper so you don't have to install a specific
|
||||
version of Maven. To enable the tests, you should have Kafka server 0.9 or above running
|
||||
before building. See below for more information on running the servers.
|
||||
|
||||
The main build command is
|
||||
|
||||
----
|
||||
$ ./mvnw clean install
|
||||
----
|
||||
|
||||
You can also add '-DskipTests' if you like, to avoid running the tests.
|
||||
|
||||
NOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command
|
||||
in place of `./mvnw` in the examples below. If you do that you also
|
||||
might need to add `-P spring` if your local Maven settings do not
|
||||
contain repository declarations for spring pre-release artifacts.
|
||||
|
||||
NOTE: Be aware that you might need to increase the amount of memory
|
||||
available to Maven by setting a `MAVEN_OPTS` environment variable with
|
||||
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
|
||||
the `.mvn` configuration, so if you find you have to do it to make a
|
||||
build succeed, please raise a ticket to get the settings added to
|
||||
source control.
|
||||
|
||||
|
||||
The projects that require middleware generally include a
|
||||
`docker-compose.yml`, so consider using
|
||||
https://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
in Docker containers.
|
||||
|
||||
=== Documentation
|
||||
|
||||
There is a "full" profile that will generate documentation.
|
||||
|
||||
=== Working with the code
|
||||
If you don't have an IDE preference we would recommend that you use
|
||||
https://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
https://eclipse.org[Eclipse] when working with the code. We use the
|
||||
https://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
should also work without issue.
|
||||
|
||||
==== Importing into eclipse with m2eclipse
|
||||
We recommend the https://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
|
||||
marketplace".
|
||||
|
||||
Unfortunately m2e does not yet support Maven 3.3, so once the projects
|
||||
are imported into Eclipse you will also need to tell m2eclipse to use
|
||||
the `.settings.xml` file for the projects. If you do not do this you
|
||||
may see many different errors related to the POMs in the
|
||||
projects. Open your Eclipse preferences, expand the Maven
|
||||
preferences, and select User Settings. In the User Settings field
|
||||
click Browse and navigate to the Spring Cloud project you imported
|
||||
selecting the `.settings.xml` file in that project. Click Apply and
|
||||
then OK to save the preference changes.
|
||||
|
||||
NOTE: Alternatively you can copy the repository settings from https://github.com/spring-cloud/spring-cloud-build/blob/master/.settings.xml[`.settings.xml`] into your own `~/.m2/settings.xml`.
|
||||
|
||||
==== Importing into eclipse without m2eclipse
|
||||
If you prefer not to use m2eclipse you can generate eclipse project metadata using the
|
||||
following command:
|
||||
|
||||
[indent=0]
|
||||
----
|
||||
$ ./mvnw eclipse:eclipse
|
||||
----
|
||||
|
||||
The generated eclipse projects can be imported by selecting `import existing projects`
|
||||
from the `file` menu.
|
||||
[[contributing]
|
||||
== Contributing
|
||||
|
||||
Spring Cloud is released under the non-restrictive Apache 2.0 license,
|
||||
and follows a very standard Github development process, using Github
|
||||
tracker for issues and merging pull requests into master. If you want
|
||||
to contribute even something trivial please do not hesitate, but
|
||||
follow the guidelines below.
|
||||
|
||||
=== Sign the Contributor License Agreement
|
||||
Before we accept a non-trivial patch or pull request we will need you to sign the
|
||||
https://support.springsource.com/spring_committer_signup[contributor's agreement].
|
||||
Signing the contributor's agreement does not grant anyone commit rights to the main
|
||||
repository, but it does mean that we can accept your contributions, and you will get an
|
||||
author credit if we do. Active contributors might be asked to join the core team, and
|
||||
given the ability to merge pull requests.
|
||||
|
||||
=== Code Conventions and Housekeeping
|
||||
None of these is essential for a pull request, but they will all help. They can also be
|
||||
added after the original pull request but before a merge.
|
||||
|
||||
* Use the Spring Framework code format conventions. If you use Eclipse
|
||||
you can import formatter settings using the
|
||||
`eclipse-code-formatter.xml` file from the
|
||||
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
|
||||
Cloud Build] project. If using IntelliJ, you can use the
|
||||
https://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
Plugin] to import the same file.
|
||||
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
|
||||
`@author` tag identifying you, and preferably at least a paragraph on what the class is
|
||||
for.
|
||||
* Add the ASF license header comment to all new `.java` files (copy from existing files
|
||||
in the project)
|
||||
* Add yourself as an `@author` to the .java files that you modify substantially (more
|
||||
than cosmetic changes).
|
||||
* Add some Javadocs and, if you change the namespace, some XSD doc elements.
|
||||
* A few unit tests would help a lot as well -- someone has to do it.
|
||||
* If no-one else is using your branch, please rebase it against the current master (or
|
||||
other target branch in the main project).
|
||||
* When writing a commit message please follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
|
||||
// ======================================================================================
|
||||
Spring Cloud Stream Binder implementation for Kafka
|
||||
|
||||
67
docs/pom.xml
67
docs/pom.xml
@@ -1,67 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.0.0.RC2</version>
|
||||
</parent>
|
||||
<packaging>pom</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-docs</name>
|
||||
<description>Spring Cloud Stream Kafka Binder Docs</description>
|
||||
<properties>
|
||||
<docs.main>spring-cloud-stream-binder-kafka</docs.main>
|
||||
<main.basedir>${basedir}/..</main.basedir>
|
||||
<maven.plugin.plugin.version>3.4</maven.plugin.plugin.version>
|
||||
</properties>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-deploy-plugin</artifactId>
|
||||
<version>2.8.2</version>
|
||||
<configuration>
|
||||
<skip>true</skip>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>docs</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctor-maven-plugin</artifactId>
|
||||
<version>${asciidoctor-maven-plugin.version}</version>
|
||||
<configuration>
|
||||
<sourceDirectory>${project.build.directory}/refdocs/</sourceDirectory>
|
||||
<attributes>
|
||||
<spring-cloud-stream-version>${project.version}</spring-cloud-stream-version>
|
||||
</attributes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
2
docs/src/main/asciidoc/.gitignore
vendored
2
docs/src/main/asciidoc/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
*.html
|
||||
*.css
|
||||
@@ -1,20 +0,0 @@
|
||||
require 'asciidoctor'
|
||||
require 'erb'
|
||||
|
||||
guard 'shell' do
|
||||
watch(/.*\.adoc$/) {|m|
|
||||
Asciidoctor.render_file('index.adoc', \
|
||||
:in_place => true, \
|
||||
:safe => Asciidoctor::SafeMode::UNSAFE, \
|
||||
:attributes=> { \
|
||||
'source-highlighter' => 'prettify', \
|
||||
'icons' => 'font', \
|
||||
'linkcss'=> 'true', \
|
||||
'copycss' => 'true', \
|
||||
'doctype' => 'book'})
|
||||
}
|
||||
end
|
||||
|
||||
guard 'livereload' do
|
||||
watch(%r{^.+\.(css|js|html)$})
|
||||
end
|
||||
@@ -1,22 +0,0 @@
|
||||
:jdkversion: 1.8
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
|
||||
image::https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka.svg?style=svg["CircleCI", link="https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka/branch/{github-tag}/graph/badge.svg["codecov", link="https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.svg[Gitter, link="https://gitter.im/spring-cloud/spring-cloud-stream-binder-kafka?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
//= Overview
|
||||
include::overview.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
@@ -1,5 +0,0 @@
|
||||
[[appendix]]
|
||||
= Appendices
|
||||
|
||||
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
[[building]]
|
||||
== Building
|
||||
|
||||
:jdkversion: 1.7
|
||||
|
||||
=== Basic Compile and Test
|
||||
|
||||
To build the source you will need to install JDK {jdkversion}.
|
||||
|
||||
The build uses the Maven wrapper so you don't have to install a specific
|
||||
version of Maven. To enable the tests, you should have Kafka server 0.9 or above running
|
||||
before building. See below for more information on running the servers.
|
||||
|
||||
The main build command is
|
||||
|
||||
----
|
||||
$ ./mvnw clean install
|
||||
----
|
||||
|
||||
You can also add '-DskipTests' if you like, to avoid running the tests.
|
||||
|
||||
NOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command
|
||||
in place of `./mvnw` in the examples below. If you do that you also
|
||||
might need to add `-P spring` if your local Maven settings do not
|
||||
contain repository declarations for spring pre-release artifacts.
|
||||
|
||||
NOTE: Be aware that you might need to increase the amount of memory
|
||||
available to Maven by setting a `MAVEN_OPTS` environment variable with
|
||||
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
|
||||
the `.mvn` configuration, so if you find you have to do it to make a
|
||||
build succeed, please raise a ticket to get the settings added to
|
||||
source control.
|
||||
|
||||
|
||||
The projects that require middleware generally include a
|
||||
`docker-compose.yml`, so consider using
|
||||
https://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
in Docker containers.
|
||||
|
||||
=== Documentation
|
||||
|
||||
There is a "full" profile that will generate documentation.
|
||||
|
||||
=== Working with the code
|
||||
If you don't have an IDE preference we would recommend that you use
|
||||
https://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
https://eclipse.org[Eclipse] when working with the code. We use the
|
||||
https://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
should also work without issue.
|
||||
|
||||
==== Importing into eclipse with m2eclipse
|
||||
We recommend the https://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
|
||||
marketplace".
|
||||
|
||||
Unfortunately m2e does not yet support Maven 3.3, so once the projects
|
||||
are imported into Eclipse you will also need to tell m2eclipse to use
|
||||
the `.settings.xml` file for the projects. If you do not do this you
|
||||
may see many different errors related to the POMs in the
|
||||
projects. Open your Eclipse preferences, expand the Maven
|
||||
preferences, and select User Settings. In the User Settings field
|
||||
click Browse and navigate to the Spring Cloud project you imported
|
||||
selecting the `.settings.xml` file in that project. Click Apply and
|
||||
then OK to save the preference changes.
|
||||
|
||||
NOTE: Alternatively you can copy the repository settings from https://github.com/spring-cloud/spring-cloud-build/blob/master/.settings.xml[`.settings.xml`] into your own `~/.m2/settings.xml`.
|
||||
|
||||
==== Importing into eclipse without m2eclipse
|
||||
If you prefer not to use m2eclipse you can generate eclipse project metadata using the
|
||||
following command:
|
||||
|
||||
[indent=0]
|
||||
----
|
||||
$ ./mvnw eclipse:eclipse
|
||||
----
|
||||
|
||||
The generated eclipse projects can be imported by selecting `import existing projects`
|
||||
from the `file` menu.
|
||||
@@ -1,42 +0,0 @@
|
||||
[[contributing]
|
||||
== Contributing
|
||||
|
||||
Spring Cloud is released under the non-restrictive Apache 2.0 license,
|
||||
and follows a very standard Github development process, using Github
|
||||
tracker for issues and merging pull requests into master. If you want
|
||||
to contribute even something trivial please do not hesitate, but
|
||||
follow the guidelines below.
|
||||
|
||||
=== Sign the Contributor License Agreement
|
||||
Before we accept a non-trivial patch or pull request we will need you to sign the
|
||||
https://support.springsource.com/spring_committer_signup[contributor's agreement].
|
||||
Signing the contributor's agreement does not grant anyone commit rights to the main
|
||||
repository, but it does mean that we can accept your contributions, and you will get an
|
||||
author credit if we do. Active contributors might be asked to join the core team, and
|
||||
given the ability to merge pull requests.
|
||||
|
||||
=== Code Conventions and Housekeeping
|
||||
None of these is essential for a pull request, but they will all help. They can also be
|
||||
added after the original pull request but before a merge.
|
||||
|
||||
* Use the Spring Framework code format conventions. If you use Eclipse
|
||||
you can import formatter settings using the
|
||||
`eclipse-code-formatter.xml` file from the
|
||||
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
|
||||
Cloud Build] project. If using IntelliJ, you can use the
|
||||
https://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
Plugin] to import the same file.
|
||||
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
|
||||
`@author` tag identifying you, and preferably at least a paragraph on what the class is
|
||||
for.
|
||||
* Add the ASF license header comment to all new `.java` files (copy from existing files
|
||||
in the project)
|
||||
* Add yourself as an `@author` to the .java files that you modify substantially (more
|
||||
than cosmetic changes).
|
||||
* Add some Javadocs and, if you change the namespace, some XSD doc elements.
|
||||
* A few unit tests would help a lot as well -- someone has to do it.
|
||||
* If no-one else is using your branch, please rebase it against the current master (or
|
||||
other target branch in the main project).
|
||||
* When writing a commit message please follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
@@ -1,132 +0,0 @@
|
||||
[[kafka-dlq-processing]]
|
||||
=== Dead-Letter Topic Processing
|
||||
|
||||
[[dlq-partition-selection]]
|
||||
==== Dead-Letter Topic Partition Selection
|
||||
|
||||
By default, records are published to the Dead-Letter topic using the same partition as the original record.
|
||||
This means the Dead-Letter topic must have at least as many partitions as the original record.
|
||||
|
||||
To change this behavior, add a `DlqPartitionFunction` implementation as a `@Bean` to the application context.
|
||||
Only one such bean can be present.
|
||||
The function is provided with the consumer group, the failed `ConsumerRecord` and the exception.
|
||||
For example, if you always want to route to partition 0, you might use:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public DlqPartitionFunction partitionFunction() {
|
||||
return (group, record, ex) -> 0;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
NOTE: If you set a consumer binding's `dlqPartitions` property to 1 (and the binder's `minPartitionCount` is equal to `1`), there is no need to supply a `DlqPartitionFunction`; the framework will always use partition 0.
|
||||
If you set a consumer binding's `dlqPartitions` property to a value greater than `1` (or the binder's `minPartitionCount` is greater than `1`), you **must** provide a `DlqPartitionFunction` bean, even if the partition count is the same as the original topic's.
|
||||
|
||||
[[dlq-handling]]
|
||||
==== Handling Records in a Dead-Letter Topic
|
||||
|
||||
Because the framework cannot anticipate how users would want to dispose of dead-lettered messages, it does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The sample Spring Boot application within this topic is an example of how to route those messages back to the original topic, but it moves them to a "`parking lot`" topic after three attempts.
|
||||
The application is another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
There are a couple of strategies to consider:
|
||||
|
||||
* Consider running the rerouting only when the main application is not running.
|
||||
Otherwise, the retries for transient errors are used up very quickly.
|
||||
* Alternatively, use a two-stage approach: Use this application to route to a third topic and another to route from there back to the main topic.
|
||||
|
||||
The following code listings show the sample application:
|
||||
|
||||
.application.properties
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.group=so8400replay
|
||||
spring.cloud.stream.bindings.input.destination=error.so8400out.so8400
|
||||
|
||||
spring.cloud.stream.bindings.output.destination=so8400out
|
||||
|
||||
spring.cloud.stream.bindings.parkingLot.destination=so8400in.parkingLot
|
||||
|
||||
spring.cloud.stream.kafka.binder.configuration.auto.offset.reset=earliest
|
||||
|
||||
spring.cloud.stream.kafka.binder.headers=x-retries
|
||||
----
|
||||
|
||||
.Application
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(TwoOutputProcessor.class)
|
||||
public class ReRouteDlqKApplication implements CommandLineRunner {
|
||||
|
||||
private static final String X_RETRIES_HEADER = "x-retries";
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ReRouteDlqKApplication.class, args).close();
|
||||
}
|
||||
|
||||
private final AtomicInteger processed = new AtomicInteger();
|
||||
|
||||
@Autowired
|
||||
private MessageChannel parkingLot;
|
||||
|
||||
@StreamListener(Processor.INPUT)
|
||||
@SendTo(Processor.OUTPUT)
|
||||
public Message<?> reRoute(Message<?> failed) {
|
||||
processed.incrementAndGet();
|
||||
Integer retries = failed.getHeaders().get(X_RETRIES_HEADER, Integer.class);
|
||||
if (retries == null) {
|
||||
System.out.println("First retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else if (retries.intValue() < 3) {
|
||||
System.out.println("Another retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(retries.intValue() + 1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else {
|
||||
System.out.println("Retries exhausted for " + failed);
|
||||
parkingLot.send(MessageBuilder.fromMessage(failed)
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run(String... args) throws Exception {
|
||||
while (true) {
|
||||
int count = this.processed.get();
|
||||
Thread.sleep(5000);
|
||||
if (count == this.processed.get()) {
|
||||
System.out.println("Idle, terminating");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public interface TwoOutputProcessor extends Processor {
|
||||
|
||||
@Output("parkingLot")
|
||||
MessageChannel parkingLot();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
@@ -1,330 +0,0 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
set -e
|
||||
|
||||
# Set default props like MAVEN_PATH, ROOT_FOLDER etc.
|
||||
function set_default_props() {
|
||||
# The script should be executed from the root folder
|
||||
ROOT_FOLDER=`pwd`
|
||||
echo "Current folder is ${ROOT_FOLDER}"
|
||||
|
||||
if [[ ! -e "${ROOT_FOLDER}/.git" ]]; then
|
||||
echo "You're not in the root folder of the project!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Prop that will let commit the changes
|
||||
COMMIT_CHANGES="no"
|
||||
MAVEN_PATH=${MAVEN_PATH:-}
|
||||
echo "Path to Maven is [${MAVEN_PATH}]"
|
||||
REPO_NAME=${PWD##*/}
|
||||
echo "Repo name is [${REPO_NAME}]"
|
||||
SPRING_CLOUD_STATIC_REPO=${SPRING_CLOUD_STATIC_REPO:-git@github.com:spring-cloud/spring-cloud-static.git}
|
||||
echo "Spring Cloud Static repo is [${SPRING_CLOUD_STATIC_REPO}"
|
||||
}
|
||||
|
||||
# Check if gh-pages exists and docs have been built
|
||||
function check_if_anything_to_sync() {
|
||||
git remote set-url --push origin `git config remote.origin.url | sed -e 's/^git:/https:/'`
|
||||
|
||||
if ! (git remote set-branches --add origin gh-pages && git fetch -q); then
|
||||
echo "No gh-pages, so not syncing"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if ! [ -d docs/target/generated-docs ] && ! [ "${BUILD}" == "yes" ]; then
|
||||
echo "No gh-pages sources in docs/target/generated-docs, so not syncing"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function retrieve_current_branch() {
|
||||
# Code getting the name of the current branch. For master we want to publish as we did until now
|
||||
# https://stackoverflow.com/questions/1593051/how-to-programmatically-determine-the-current-checked-out-git-branch
|
||||
# If there is a branch already passed will reuse it - otherwise will try to find it
|
||||
CURRENT_BRANCH=${BRANCH}
|
||||
if [[ -z "${CURRENT_BRANCH}" ]] ; then
|
||||
CURRENT_BRANCH=$(git symbolic-ref -q HEAD)
|
||||
CURRENT_BRANCH=${CURRENT_BRANCH##refs/heads/}
|
||||
CURRENT_BRANCH=${CURRENT_BRANCH:-HEAD}
|
||||
fi
|
||||
echo "Current branch is [${CURRENT_BRANCH}]"
|
||||
git checkout ${CURRENT_BRANCH} || echo "Failed to check the branch... continuing with the script"
|
||||
}
|
||||
|
||||
# Switches to the provided value of the release version. We always prefix it with `v`
|
||||
function switch_to_tag() {
|
||||
git checkout v${VERSION}
|
||||
}
|
||||
|
||||
# Build the docs if switch is on
|
||||
function build_docs_if_applicable() {
|
||||
if [[ "${BUILD}" == "yes" ]] ; then
|
||||
./mvnw clean install -P docs -pl docs -DskipTests
|
||||
fi
|
||||
}
|
||||
|
||||
# Get the name of the `docs.main` property
|
||||
# Get whitelisted branches - assumes that a `docs` module is available under `docs` profile
|
||||
function retrieve_doc_properties() {
|
||||
MAIN_ADOC_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
-Dexec.args='${docs.main}' \
|
||||
--non-recursive \
|
||||
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
|
||||
echo "Extracted 'main.adoc' from Maven build [${MAIN_ADOC_VALUE}]"
|
||||
|
||||
|
||||
WHITELIST_PROPERTY=${WHITELIST_PROPERTY:-"docs.whitelisted.branches"}
|
||||
WHITELISTED_BRANCHES_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
-Dexec.args="\${${WHITELIST_PROPERTY}}" \
|
||||
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec \
|
||||
-P docs \
|
||||
-pl docs)
|
||||
echo "Extracted '${WHITELIST_PROPERTY}' from Maven build [${WHITELISTED_BRANCHES_VALUE}]"
|
||||
}
|
||||
|
||||
# Stash any outstanding changes
|
||||
function stash_changes() {
|
||||
git diff-index --quiet HEAD && dirty=$? || (echo "Failed to check if the current repo is dirty. Assuming that it is." && dirty="1")
|
||||
if [ "$dirty" != "0" ]; then git stash; fi
|
||||
}
|
||||
|
||||
# Switch to gh-pages branch to sync it with current branch
|
||||
function add_docs_from_target() {
|
||||
local DESTINATION_REPO_FOLDER
|
||||
if [[ -z "${DESTINATION}" && -z "${CLONE}" ]] ; then
|
||||
DESTINATION_REPO_FOLDER=${ROOT_FOLDER}
|
||||
elif [[ "${CLONE}" == "yes" ]]; then
|
||||
mkdir -p ${ROOT_FOLDER}/target
|
||||
local clonedStatic=${ROOT_FOLDER}/target/spring-cloud-static
|
||||
if [[ ! -e "${clonedStatic}/.git" ]]; then
|
||||
echo "Cloning Spring Cloud Static to target"
|
||||
git clone ${SPRING_CLOUD_STATIC_REPO} ${clonedStatic} && git checkout gh-pages
|
||||
else
|
||||
echo "Spring Cloud Static already cloned - will pull changes"
|
||||
cd ${clonedStatic} && git checkout gh-pages && git pull origin gh-pages
|
||||
fi
|
||||
DESTINATION_REPO_FOLDER=${clonedStatic}/${REPO_NAME}
|
||||
mkdir -p ${DESTINATION_REPO_FOLDER}
|
||||
else
|
||||
if [[ ! -e "${DESTINATION}/.git" ]]; then
|
||||
echo "[${DESTINATION}] is not a git repository"
|
||||
exit 1
|
||||
fi
|
||||
DESTINATION_REPO_FOLDER=${DESTINATION}/${REPO_NAME}
|
||||
mkdir -p ${DESTINATION_REPO_FOLDER}
|
||||
echo "Destination was provided [${DESTINATION}]"
|
||||
fi
|
||||
cd ${DESTINATION_REPO_FOLDER}
|
||||
git checkout gh-pages
|
||||
git pull origin gh-pages
|
||||
|
||||
# Add git branches
|
||||
###################################################################
|
||||
if [[ -z "${VERSION}" ]] ; then
|
||||
copy_docs_for_current_version
|
||||
else
|
||||
copy_docs_for_provided_version
|
||||
fi
|
||||
commit_changes_if_applicable
|
||||
}
|
||||
|
||||
|
||||
# Copies the docs by using the retrieved properties from Maven build
|
||||
function copy_docs_for_current_version() {
|
||||
if [[ "${CURRENT_BRANCH}" == "master" ]] ; then
|
||||
echo -e "Current branch is master - will copy the current docs only to the root folder"
|
||||
for f in docs/target/generated-docs/*; do
|
||||
file=${f#docs/target/generated-docs/*}
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^$file$; then
|
||||
# Not ignored...
|
||||
cp -rf $f ${ROOT_FOLDER}/
|
||||
git add -A ${ROOT_FOLDER}/$file
|
||||
fi
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
else
|
||||
echo -e "Current branch is [${CURRENT_BRANCH}]"
|
||||
# https://stackoverflow.com/questions/29300806/a-bash-script-to-check-if-a-string-is-present-in-a-comma-separated-list-of-strin
|
||||
if [[ ",${WHITELISTED_BRANCHES_VALUE}," = *",${CURRENT_BRANCH},"* ]] ; then
|
||||
mkdir -p ${ROOT_FOLDER}/${CURRENT_BRANCH}
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is whitelisted! Will copy the current docs to the [${CURRENT_BRANCH}] folder"
|
||||
for f in docs/target/generated-docs/*; do
|
||||
file=${f#docs/target/generated-docs/*}
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^$file$; then
|
||||
# Not ignored...
|
||||
# We want users to access 2.0.0.BUILD-SNAPSHOT/ instead of 1.0.0.RELEASE/spring-cloud.sleuth.html
|
||||
if [[ "${file}" == "${MAIN_ADOC_VALUE}.html" ]] ; then
|
||||
# We don't want to copy the spring-cloud-sleuth.html
|
||||
# we want it to be converted to index.html
|
||||
cp -rf $f ${ROOT_FOLDER}/${CURRENT_BRANCH}/index.html
|
||||
git add -A ${ROOT_FOLDER}/${CURRENT_BRANCH}/index.html
|
||||
else
|
||||
cp -rf $f ${ROOT_FOLDER}/${CURRENT_BRANCH}
|
||||
git add -A ${ROOT_FOLDER}/${CURRENT_BRANCH}/$file
|
||||
fi
|
||||
fi
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
else
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is not on the white list! Check out the Maven [${WHITELIST_PROPERTY}] property in
|
||||
[docs] module available under [docs] profile. Won't commit any changes to gh-pages for this branch."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Copies the docs by using the explicitly provided version
|
||||
function copy_docs_for_provided_version() {
|
||||
local FOLDER=${DESTINATION_REPO_FOLDER}/${VERSION}
|
||||
mkdir -p ${FOLDER}
|
||||
echo -e "Current tag is [v${VERSION}] Will copy the current docs to the [${FOLDER}] folder"
|
||||
for f in ${ROOT_FOLDER}/docs/target/generated-docs/*; do
|
||||
file=${f#${ROOT_FOLDER}/docs/target/generated-docs/*}
|
||||
copy_docs_for_branch ${file} ${FOLDER}
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
CURRENT_BRANCH="v${VERSION}"
|
||||
}
|
||||
|
||||
# Copies the docs from target to the provided destination
|
||||
# Params:
|
||||
# $1 - file from target
|
||||
# $2 - destination to which copy the files
|
||||
function copy_docs_for_branch() {
|
||||
local file=$1
|
||||
local destination=$2
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^${file}$; then
|
||||
# Not ignored...
|
||||
# We want users to access 2.0.0.BUILD-SNAPSHOT/ instead of 1.0.0.RELEASE/spring-cloud.sleuth.html
|
||||
if [[ ("${file}" == "${MAIN_ADOC_VALUE}.html") || ("${file}" == "${REPO_NAME}.html") ]] ; then
|
||||
# We don't want to copy the spring-cloud-sleuth.html
|
||||
# we want it to be converted to index.html
|
||||
cp -rf $f ${destination}/index.html
|
||||
git add -A ${destination}/index.html
|
||||
else
|
||||
cp -rf $f ${destination}
|
||||
git add -A ${destination}/$file
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function commit_changes_if_applicable() {
|
||||
if [[ "${COMMIT_CHANGES}" == "yes" ]] ; then
|
||||
COMMIT_SUCCESSFUL="no"
|
||||
git commit -a -m "Sync docs from ${CURRENT_BRANCH} to gh-pages" && COMMIT_SUCCESSFUL="yes" || echo "Failed to commit changes"
|
||||
|
||||
# Uncomment the following push if you want to auto push to
|
||||
# the gh-pages branch whenever you commit to master locally.
|
||||
# This is a little extreme. Use with care!
|
||||
###################################################################
|
||||
if [[ "${COMMIT_SUCCESSFUL}" == "yes" ]] ; then
|
||||
git push origin gh-pages
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Switch back to the previous branch and exit block
|
||||
function checkout_previous_branch() {
|
||||
# If -version was provided we need to come back to root project
|
||||
cd ${ROOT_FOLDER}
|
||||
git checkout ${CURRENT_BRANCH} || echo "Failed to check the branch... continuing with the script"
|
||||
if [ "$dirty" != "0" ]; then git stash pop; fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Assert if properties have been properly passed
|
||||
function assert_properties() {
|
||||
echo "VERSION [${VERSION}], DESTINATION [${DESTINATION}], CLONE [${CLONE}]"
|
||||
if [[ "${VERSION}" != "" && (-z "${DESTINATION}" && -z "${CLONE}") ]] ; then echo "Version was set but destination / clone was not!"; exit 1;fi
|
||||
if [[ ("${DESTINATION}" != "" && "${CLONE}" != "") && -z "${VERSION}" ]] ; then echo "Destination / clone was set but version was not!"; exit 1;fi
|
||||
if [[ "${DESTINATION}" != "" && "${CLONE}" == "yes" ]] ; then echo "Destination and clone was set. Pick one!"; exit 1;fi
|
||||
}
|
||||
|
||||
# Prints the usage
|
||||
function print_usage() {
|
||||
cat <<EOF
|
||||
The idea of this script is to update gh-pages branch with the generated docs. Without any options
|
||||
the script will work in the following manner:
|
||||
|
||||
- if there's no gh-pages / target for docs module then the script ends
|
||||
- for master branch the generated docs are copied to the root of gh-pages branch
|
||||
- for any other branch (if that branch is whitelisted) a subfolder with branch name is created
|
||||
and docs are copied there
|
||||
- if the version switch is passed (-v) then a tag with (v) prefix will be retrieved and a folder
|
||||
with that version number will be created in the gh-pages branch. WARNING! No whitelist verification will take place
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
switch to gh-pages of that repo and copy the generated docs to `docs/<project-name>/<version>`
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
switch to gh-pages of that repo and copy the generated docs to `docs/<project-name>/<version>`
|
||||
|
||||
USAGE:
|
||||
|
||||
You can use the following options:
|
||||
|
||||
-v|--version - the script will apply the whole procedure for a particular library version
|
||||
-d|--destination - the root of destination folder where the docs should be copied. You have to use the full path.
|
||||
E.g. point to spring-cloud-static folder. Can't be used with (-c)
|
||||
-b|--build - will run the standard build process after checking out the branch
|
||||
-c|--clone - will automatically clone the spring-cloud-static repo instead of providing the destination.
|
||||
Obviously can't be used with (-d)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
# ==========================================
|
||||
# ____ ____ _____ _____ _____ _______
|
||||
# / ____|/ ____| __ \|_ _| __ \__ __|
|
||||
# | (___ | | | |__) | | | | |__) | | |
|
||||
# \___ \| | | _ / | | | ___/ | |
|
||||
# ____) | |____| | \ \ _| |_| | | |
|
||||
# |_____/ \_____|_| \_\_____|_| |_|
|
||||
#
|
||||
# ==========================================
|
||||
|
||||
while [[ $# > 0 ]]
|
||||
do
|
||||
key="$1"
|
||||
case ${key} in
|
||||
-v|--version)
|
||||
VERSION="$2"
|
||||
shift # past argument
|
||||
;;
|
||||
-d|--destination)
|
||||
DESTINATION="$2"
|
||||
shift # past argument
|
||||
;;
|
||||
-b|--build)
|
||||
BUILD="yes"
|
||||
;;
|
||||
-c|--clone)
|
||||
CLONE="yes"
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option: [$1]"
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift # past argument or value
|
||||
done
|
||||
|
||||
assert_properties
|
||||
set_default_props
|
||||
check_if_anything_to_sync
|
||||
if [[ -z "${VERSION}" ]] ; then
|
||||
retrieve_current_branch
|
||||
else
|
||||
switch_to_tag
|
||||
fi
|
||||
build_docs_if_applicable
|
||||
retrieve_doc_properties
|
||||
stash_changes
|
||||
add_docs_from_target
|
||||
checkout_previous_branch
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 9.2 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 119 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 117 KiB |
@@ -1,14 +0,0 @@
|
||||
<productname>Spring Cloud Stream Kafka Binder</productname>
|
||||
<releaseinfo>{spring-cloud-stream-binder-kafka-version}</releaseinfo>
|
||||
<copyright>
|
||||
<year>2013-2016</year>
|
||||
<holder>Pivotal Software, Inc.</holder>
|
||||
</copyright>
|
||||
<legalnotice>
|
||||
<para>
|
||||
Copies of this document may be made for your own use and for distribution to
|
||||
others, provided that you do not charge any fee for such copies and further
|
||||
provided that each copy contains this Copyright Notice, whether distributed in
|
||||
print or electronically.
|
||||
</para>
|
||||
</legalnotice>
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,690 +0,0 @@
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
=== Overview
|
||||
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
.Kafka Binder
|
||||
image::{github-raw}/docs/src/main/asciidoc/images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` version `2.3.1`.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
=== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
==== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Unknown Kafka producer or consumer properties provided through this configuration are filtered out and not allowed to propagate.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
In addition to support known Kafka consumer properties, unknown consumer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
In addition to support known Kafka producer properties, unknown producer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.create.topics.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `BinderHeaderMapper` bean that uses JSON deserialization for the headers.
|
||||
If this custom `BinderHeaderMapper` bean is not made available to the binder using this property, then the binder will look for a header mapper bean with the name `kafkaBinderHeaderMapper` that is of type `BinderHeaderMapper` before falling back to a default `BinderHeaderMapper` created by the binder.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
==== Kafka Consumer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.consumer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Must be false if a `KafkaRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
By default, a failed record is sent to the same partition number in the DLQ topic as the original record.
|
||||
See <<dlq-partition-selection>> for how to change that behavior.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
dlqPartitions::
|
||||
When `enableDlq` is true, and this property is not set, a dead letter topic with the same number of partitions as the primary topic(s) is created.
|
||||
Usually, dead-letter records are sent to the same partition in the dead-letter topic as the original record.
|
||||
This behavior can be changed; see <<dlq-partition-selection>>.
|
||||
If this property is set to `1` and there is no `DqlPartitionFunction` bean, all dead-letter records will be written to partition `0`.
|
||||
If this property is greater than `1`, you **MUST** provide a `DlqPartitionFunction` bean.
|
||||
Note that the actual partition count is affected by the binder's `minPartitionCount` property.
|
||||
+
|
||||
Default: `none`
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
For example some properties needed by the application such as `spring.cloud.stream.kafka.bindings.input.consumer.configuration.foo=bar`.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
When native decoding is enabled on the consumer (i.e., useNativeDecoding: true) , the application must provide corresponding key/value serializers for DLQ.
|
||||
This must be provided in the form of `dlqProducerProperties.configuration.key.serializer` and `dlqProducerProperties.configuration.value.serializer`.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
pollTimeout::
|
||||
Timeout used for polling in pollable consumers.
|
||||
+
|
||||
Default: 5 seconds.
|
||||
|
||||
==== Consuming Batches
|
||||
|
||||
Starting with version 3.0, when `spring.cloud.stream.binding.<name>.consumer.batch-mode` is set to `true`, all of the records received by polling the Kafka `Consumer` will be presented as a `List<?>` to the listener method.
|
||||
Otherwise, the method will be called with one record at a time.
|
||||
The size of the batch is controlled by Kafka consumer properties `max.poll.records`, `min.fetch.bytes`, `fetch.max.wait.ms`; refer to the Kafka documentation for more information.
|
||||
|
||||
IMPORTANT: Retry within the binder is not supported when using batch mode, so `maxAttempts` will be overridden to 1.
|
||||
You can configure a `SeekToCurrentBatchErrorHandler` (using a `ListenerContainerCustomizer`) to achieve similar functionality to retry in the binder.
|
||||
You can also use a manual `AckMode` and call `Ackowledgment.nack(index, sleep)` to commit the offsets for a partial batch and have the remaining records redelivered.
|
||||
Refer to the https://docs.spring.io/spring-kafka/docs/2.3.0.BUILD-SNAPSHOT/reference/html/#committing-offsets[Spring for Apache Kafka documentation] for more information about these techniques.
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
==== Kafka Producer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.producer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
sendTimeoutExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to evaluate the time to wait for ack when synchronous publish is enabled -- for example, `headers['mySendTimeout']`.
|
||||
The value of the timeout is in milliseconds.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.output.producer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
useTopicHeader::
|
||||
Set to `true` to override the default binding destination (topic name) with the value of the `KafkaHeaders.TOPIC` message header in the outbound message.
|
||||
If the header is not present, the default binding destination is used.
|
||||
Default: `false`.
|
||||
+
|
||||
recordMetadataChannel::
|
||||
The bean name of a `MessageChannel` to which successful send results should be sent; the bean must exist in the application context.
|
||||
The message sent to the channel is the sent message (after conversion, if any) with an additional header `KafkaHeaders.RECORD_METADATA`.
|
||||
The header contains a `RecordMetadata` object provided by the Kafka client; it includes the partition and offset where the record was written in the topic.
|
||||
|
||||
`ResultMetadata meta = sendResultMsg.getHeaders().get(KafkaHeaders.RECORD_METADATA, RecordMetadata.class)`
|
||||
|
||||
Failed sends go the producer error channel (if configured); see <<kafka-error-channels>>.
|
||||
Default: null
|
||||
+
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
compression::
|
||||
Set the `compression.type` producer property.
|
||||
Supported values are `none`, `gzip`, `snappy` and `lz4`.
|
||||
If you override the `kafka-clients` jar to 2.1.0 (or later), as discussed in the https://docs.spring.io/spring-kafka/docs/2.2.x/reference/html/deps-for-21x.html[Spring for Apache Kafka documentation], and wish to use `zstd` compression, use `spring.cloud.stream.kafka.bindings.<binding-name>.producer.configuration.compression.type=zstd`.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
==== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
===== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
===== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the https://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 https://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the https://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
====== Using JAAS Configuration Files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
====== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
[[pause-resume]]
|
||||
===== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-transactional-binder]]
|
||||
=== Transactional Binder
|
||||
|
||||
Enable transactions by setting `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` to a non-empty value, e.g. `tx-`.
|
||||
When used in a processor application, the consumer starts the transaction; any records sent on the consumer thread participate in the same transaction.
|
||||
When the listener exits normally, the listener container will send the offset to the transaction and commit it.
|
||||
A common producer factory is used for all producer bindings configured using `spring.cloud.stream.kafka.binder.transaction.producer.*` properties; individual binding Kafka producer properties are ignored.
|
||||
|
||||
If you wish to use transactions in a source application, or from some arbitrary thread for producer-only transaction (e.g. `@Scheduled` method), you must get a reference to the transactional producer factory and define a `KafkaTransactionManager` bean using it.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public PlatformTransactionManager transactionManager(BinderFactory binders) {
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
return new KafkaTransactionManager<>(pf);
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Notice that we get a reference to the binder using the `BinderFactory`; use `null` in the first argument when there is only one binder configured.
|
||||
If more than one binder is configured, use the binder name to get the reference.
|
||||
Once we have a reference to the binder, we can obtain a reference to the `ProducerFactory` and create a transaction manager.
|
||||
|
||||
Then you would use normal Spring transaction support, e.g. `TransactionTemplate` or `@Transactional`, for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public static class Sender {
|
||||
|
||||
@Transactional
|
||||
public void doInTransaction(MessageChannel output, List<String> stuffToSend) {
|
||||
stuffToSend.forEach(stuff -> output.send(new GenericMessage<>(stuff)));
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
If you wish to synchronize producer-only transactions with those from some other transaction manager, use a `ChainedTransactionManager`.
|
||||
|
||||
[[kafka-error-channels]]
|
||||
=== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
=== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
[[kafka-tombstones]]
|
||||
=== Tombstone Records (null record values)
|
||||
|
||||
When using compacted topics, a record with a `null` value (also called a tombstone record) represents the deletion of a key.
|
||||
To receive such messages in a `@StreamListener` method, the parameter must be marked as not required to receive a `null` value argument.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) byte[] key,
|
||||
@Payload(required = false) Customer customer) {
|
||||
// customer is null if a tombstone record
|
||||
...
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
[[rebalance-listener]]
|
||||
=== Using a KafkaRebalanceListener
|
||||
|
||||
Applications may wish to seek topics/partitions to arbitrary offsets when the partitions are initially assigned, or perform other operations on the consumer.
|
||||
Starting with version 2.1, if you provide a single `KafkaRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public interface KafkaBindingRebalanceListener {
|
||||
|
||||
/**
|
||||
* Invoked by the container before any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedBeforeCommit(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by the container after any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedAfterCommit(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance.
|
||||
* Applications might only want to perform seek operations on an initial assignment.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions,
|
||||
boolean initial) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
You cannot set the `resetOffsets` consumer property to `true` when you provide a rebalance listener.
|
||||
@@ -1,103 +0,0 @@
|
||||
=== Partitioning with the Kafka Binder
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
Sometimes it is advantageous to send data to specific partitions -- for example, when you want to strictly order message processing (all messages for a particular customer should go to the same partition).
|
||||
|
||||
The following example shows how to configure the producer and consumer side:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Source.class)
|
||||
public class KafkaPartitionProducerApplication {
|
||||
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
|
||||
private static final String[] data = new String[] {
|
||||
"foo1", "bar1", "qux1",
|
||||
"foo2", "bar2", "qux2",
|
||||
"foo3", "bar3", "qux3",
|
||||
"foo4", "bar4", "qux4",
|
||||
};
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionProducerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@InboundChannelAdapter(channel = Source.OUTPUT, poller = @Poller(fixedRate = "5000"))
|
||||
public Message<?> generate() {
|
||||
String value = data[RANDOM.nextInt(data.length)];
|
||||
System.out.println("Sending: " + value);
|
||||
return MessageBuilder.withPayload(value)
|
||||
.setHeader("partitionKey", value)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
output:
|
||||
destination: partitioned.topic
|
||||
producer:
|
||||
partition-key-expression: headers['partitionKey']
|
||||
partition-count: 12
|
||||
----
|
||||
|
||||
IMPORTANT: The topic must be provisioned to have enough partitions to achieve the desired concurrency for all consumer groups.
|
||||
The above configuration supports up to 12 consumer instances (6 if their `concurrency` is 2, 4 if their concurrency is 3, and so on).
|
||||
It is generally best to "`over-provision`" the partitions to allow for future increases in consumers or concurrency.
|
||||
|
||||
NOTE: The preceding configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values.
|
||||
You can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
|
||||
Since partitions are natively handled by Kafka, no special configuration is needed on the consumer side.
|
||||
Kafka allocates partitions across the instances.
|
||||
|
||||
The following Spring Boot application listens to a Kafka stream and prints (to the console) the partition ID to which each message goes:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class KafkaPartitionConsumerApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionConsumerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void listen(@Payload String in, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
|
||||
System.out.println(in + " received from partition " + partition);
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
input:
|
||||
destination: partitioned.topic
|
||||
group: myGroup
|
||||
----
|
||||
|
||||
You can add instances as needed.
|
||||
Kafka rebalances the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers are idle.
|
||||
@@ -1,3 +0,0 @@
|
||||
include::overview.adoc[leveloffset=+1]
|
||||
include::dlq.adoc[leveloffset=+1]
|
||||
include::partitions.adoc[leveloffset=+1]
|
||||
@@ -1,53 +0,0 @@
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
:toc: left
|
||||
:toclevels: 8
|
||||
:nofooter:
|
||||
:sectlinks: true
|
||||
|
||||
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell, Arnaud Jardiné, Soby Chacko
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
:source-highlighter: prettify
|
||||
:numbered:
|
||||
:icons: font
|
||||
:hide-uri-scheme:
|
||||
:spring-cloud-stream-binder-kafka-repo: snapshot
|
||||
:github-tag: master
|
||||
:spring-cloud-stream-binder-kafka-docs-version: current
|
||||
:spring-cloud-stream-binder-kafka-docs: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
|
||||
:spring-cloud-stream-binder-kafka-docs-current: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: https://raw.github.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
:github-wiki: https://github.com/{github-repo}/wiki
|
||||
:github-master-code: https://github.com/{github-repo}/tree/master
|
||||
:sc-ext: java
|
||||
// ======================================================================================
|
||||
|
||||
|
||||
*{spring-cloud-stream-version}*
|
||||
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
include::kafka-streams.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
base_dir = File.join(File.dirname(__FILE__),'../../..')
|
||||
src_dir = File.join(base_dir, "/src/main/asciidoc")
|
||||
require 'asciidoctor'
|
||||
require 'optparse'
|
||||
|
||||
options = {}
|
||||
file = "#{src_dir}/README.adoc"
|
||||
|
||||
OptionParser.new do |o|
|
||||
o.on('-o OUTPUT_FILE', 'Output file (default is stdout)') { |file| options[:to_file] = file unless file=='-' }
|
||||
o.on('-h', '--help') { puts o; exit }
|
||||
o.parse!
|
||||
end
|
||||
|
||||
file = ARGV[0] if ARGV.length>0
|
||||
|
||||
# Copied from https://github.com/asciidoctor/asciidoctor-extensions-lab/blob/master/scripts/asciidoc-coalescer.rb
|
||||
doc = Asciidoctor.load_file file, safe: :unsafe, header_only: true, attributes: options[:attributes]
|
||||
header_attr_names = (doc.instance_variable_get :@attributes_modified).to_a
|
||||
header_attr_names.each {|k| doc.attributes[%(#{k}!)] = '' unless doc.attr? k }
|
||||
attrs = doc.attributes
|
||||
attrs['allow-uri-read'] = true
|
||||
puts attrs
|
||||
|
||||
out = "// Do not edit this file (e.g. go instead to src/main/asciidoc)\n\n"
|
||||
doc = Asciidoctor.load_file file, safe: :unsafe, parse: false, attributes: attrs
|
||||
out << doc.reader.read
|
||||
|
||||
unless options[:to_file]
|
||||
puts out
|
||||
else
|
||||
File.open(options[:to_file],'w+') do |file|
|
||||
file.write(out)
|
||||
end
|
||||
end
|
||||
158
mvnw
vendored
158
mvnw
vendored
@@ -54,16 +54,38 @@ case "`uname`" in
|
||||
CYGWIN*) cygwin=true ;;
|
||||
MINGW*) mingw=true;;
|
||||
Darwin*) darwin=true
|
||||
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
|
||||
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ -x "/usr/libexec/java_home" ]; then
|
||||
export JAVA_HOME="`/usr/libexec/java_home`"
|
||||
else
|
||||
export JAVA_HOME="/Library/Java/Home"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
#
|
||||
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
|
||||
# for the new JDKs provided by Oracle.
|
||||
#
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
|
||||
#
|
||||
# Oracle JDKs
|
||||
#
|
||||
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
@@ -108,7 +130,7 @@ if $cygwin ; then
|
||||
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# For Mingw, ensure paths are in UNIX format before anything is touched
|
||||
# For Migwn, ensure paths are in UNIX format before anything is touched
|
||||
if $mingw ; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME="`(cd "$M2_HOME"; pwd)`"
|
||||
@@ -162,28 +184,27 @@ fi
|
||||
|
||||
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# traverses directory structure from process work directory to filesystem root
|
||||
# first directory with .mvn subdirectory is considered project base directory
|
||||
find_maven_basedir() {
|
||||
|
||||
if [ -z "$1" ]
|
||||
then
|
||||
echo "Path not specified to find_maven_basedir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
basedir="$1"
|
||||
wdir="$1"
|
||||
local basedir=$(pwd)
|
||||
local wdir=$(pwd)
|
||||
while [ "$wdir" != '/' ] ; do
|
||||
if [ -d "$wdir"/.mvn ] ; then
|
||||
basedir=$wdir
|
||||
break
|
||||
fi
|
||||
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
|
||||
if [ -d "${wdir}" ]; then
|
||||
wdir=`cd "$wdir/.."; pwd`
|
||||
fi
|
||||
# end of workaround
|
||||
wdir=$(cd "$wdir/.."; pwd)
|
||||
done
|
||||
echo "${basedir}"
|
||||
}
|
||||
@@ -195,87 +216,13 @@ concat_lines() {
|
||||
fi
|
||||
}
|
||||
|
||||
BASE_DIR=`find_maven_basedir "$(pwd)"`
|
||||
if [ -z "$BASE_DIR" ]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
##########################################################################################
|
||||
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
# This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
##########################################################################################
|
||||
if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found .mvn/wrapper/maven-wrapper.jar"
|
||||
fi
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
|
||||
fi
|
||||
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
|
||||
while IFS="=" read key value; do
|
||||
case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
|
||||
esac
|
||||
done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Downloading from: $jarUrl"
|
||||
fi
|
||||
wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
|
||||
|
||||
if command -v wget > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found wget ... using wget"
|
||||
fi
|
||||
wget "$jarUrl" -O "$wrapperJarPath"
|
||||
elif command -v curl > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found curl ... using curl"
|
||||
fi
|
||||
curl -o "$wrapperJarPath" "$jarUrl"
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Falling back to using Java to download"
|
||||
fi
|
||||
javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
|
||||
if [ -e "$javaClass" ]; then
|
||||
if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Compiling MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
# Compiling the Java class
|
||||
("$JAVA_HOME/bin/javac" "$javaClass")
|
||||
fi
|
||||
if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
# Running the downloader
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Running MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
##########################################################################################
|
||||
# End of extension
|
||||
##########################################################################################
|
||||
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo $MAVEN_PROJECTBASEDIR
|
||||
fi
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
|
||||
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
[ -n "$MAVEN_PROJECTBASEDIR" ] &&
|
||||
MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
|
||||
fi
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
export MAVEN_CMD_LINE_ARGS
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
@@ -283,4 +230,5 @@ exec "$JAVACMD" \
|
||||
$MAVEN_OPTS \
|
||||
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||
${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
|
||||
${WRAPPER_LAUNCHER} "$@"
|
||||
|
||||
|
||||
395
mvnw.cmd
vendored
Executable file → Normal file
395
mvnw.cmd
vendored
Executable file → Normal file
@@ -1,161 +1,234 @@
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Licensed to the Apache Software Foundation (ASF) under one
|
||||
@REM or more contributor license agreements. See the NOTICE file
|
||||
@REM distributed with this work for additional information
|
||||
@REM regarding copyright ownership. The ASF licenses this file
|
||||
@REM to you under the Apache License, Version 2.0 (the
|
||||
@REM "License"); you may not use this file except in compliance
|
||||
@REM with the License. You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM https://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing,
|
||||
@REM software distributed under the License is distributed on an
|
||||
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
@REM KIND, either express or implied. See the License for the
|
||||
@REM specific language governing permissions and limitations
|
||||
@REM under the License.
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Maven2 Start Up Batch script
|
||||
@REM
|
||||
@REM Required ENV vars:
|
||||
@REM JAVA_HOME - location of a JDK home dir
|
||||
@REM
|
||||
@REM Optional ENV vars
|
||||
@REM M2_HOME - location of maven2's installed home dir
|
||||
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
|
||||
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
@REM e.g. to debug Maven itself, use
|
||||
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||
@echo off
|
||||
@REM set title of command window
|
||||
title %0
|
||||
@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
|
||||
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||
|
||||
@REM set %HOME% to equivalent of $HOME
|
||||
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
|
||||
|
||||
@REM Execute a user defined script before this one
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
|
||||
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
|
||||
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
|
||||
:skipRcPre
|
||||
|
||||
@setlocal
|
||||
|
||||
set ERROR_CODE=0
|
||||
|
||||
@REM To isolate internal variables from possible post scripts, we use another setlocal
|
||||
@setlocal
|
||||
|
||||
@REM ==== START VALIDATION ====
|
||||
if not "%JAVA_HOME%" == "" goto OkJHome
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME not found in your environment. >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
:OkJHome
|
||||
if exist "%JAVA_HOME%\bin\java.exe" goto init
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME is set to an invalid directory. >&2
|
||||
echo JAVA_HOME = "%JAVA_HOME%" >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
@REM ==== END VALIDATION ====
|
||||
|
||||
:init
|
||||
|
||||
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||
@REM Fallback to current working directory if not found.
|
||||
|
||||
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
|
||||
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
|
||||
|
||||
set EXEC_DIR=%CD%
|
||||
set WDIR=%EXEC_DIR%
|
||||
:findBaseDir
|
||||
IF EXIST "%WDIR%"\.mvn goto baseDirFound
|
||||
cd ..
|
||||
IF "%WDIR%"=="%CD%" goto baseDirNotFound
|
||||
set WDIR=%CD%
|
||||
goto findBaseDir
|
||||
|
||||
:baseDirFound
|
||||
set MAVEN_PROJECTBASEDIR=%WDIR%
|
||||
cd "%EXEC_DIR%"
|
||||
goto endDetectBaseDir
|
||||
|
||||
:baseDirNotFound
|
||||
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
|
||||
cd "%EXEC_DIR%"
|
||||
|
||||
:endDetectBaseDir
|
||||
|
||||
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
|
||||
|
||||
@setlocal EnableExtensions EnableDelayedExpansion
|
||||
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
|
||||
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
|
||||
|
||||
:endReadAdditionalConfig
|
||||
|
||||
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
|
||||
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
|
||||
FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
|
||||
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
|
||||
)
|
||||
|
||||
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
if exist %WRAPPER_JAR% (
|
||||
echo Found %WRAPPER_JAR%
|
||||
) else (
|
||||
echo Couldn't find %WRAPPER_JAR%, downloading it ...
|
||||
echo Downloading from: %DOWNLOAD_URL%
|
||||
powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
|
||||
echo Finished downloading %WRAPPER_JAR%
|
||||
)
|
||||
@REM End of extension
|
||||
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
|
||||
if ERRORLEVEL 1 goto error
|
||||
goto end
|
||||
|
||||
:error
|
||||
set ERROR_CODE=1
|
||||
|
||||
:end
|
||||
@endlocal & set ERROR_CODE=%ERROR_CODE%
|
||||
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
|
||||
@REM check for post script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
|
||||
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
|
||||
:skipRcPost
|
||||
|
||||
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
|
||||
if "%MAVEN_BATCH_PAUSE%" == "on" pause
|
||||
|
||||
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
|
||||
|
||||
exit /B %ERROR_CODE%
|
||||
#!/bin/sh
|
||||
# ----------------------------------------------------------------------------
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Maven2 Start Up Batch script
|
||||
#
|
||||
# Required ENV vars:
|
||||
# ------------------
|
||||
# JAVA_HOME - location of a JDK home dir
|
||||
#
|
||||
# Optional ENV vars
|
||||
# -----------------
|
||||
# M2_HOME - location of maven2's installed home dir
|
||||
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
# e.g. to debug Maven itself, use
|
||||
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
if [ -z "$MAVEN_SKIP_RC" ] ; then
|
||||
|
||||
if [ -f /etc/mavenrc ] ; then
|
||||
. /etc/mavenrc
|
||||
fi
|
||||
|
||||
if [ -f "$HOME/.mavenrc" ] ; then
|
||||
. "$HOME/.mavenrc"
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
# OS specific support. $var _must_ be set to either true or false.
|
||||
cygwin=false;
|
||||
darwin=false;
|
||||
mingw=false
|
||||
case "`uname`" in
|
||||
CYGWIN*) cygwin=true ;;
|
||||
MINGW*) mingw=true;;
|
||||
Darwin*) darwin=true
|
||||
#
|
||||
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
|
||||
# for the new JDKs provided by Oracle.
|
||||
#
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
|
||||
#
|
||||
# Oracle JDKs
|
||||
#
|
||||
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
if [ -r /etc/gentoo-release ] ; then
|
||||
JAVA_HOME=`java-config --jre-home`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$M2_HOME" ] ; then
|
||||
## resolve links - $0 may be a link to maven's home
|
||||
PRG="$0"
|
||||
|
||||
# need this for relative symlinks
|
||||
while [ -h "$PRG" ] ; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG="`dirname "$PRG"`/$link"
|
||||
fi
|
||||
done
|
||||
|
||||
saveddir=`pwd`
|
||||
|
||||
M2_HOME=`dirname "$PRG"`/..
|
||||
|
||||
# make it fully qualified
|
||||
M2_HOME=`cd "$M2_HOME" && pwd`
|
||||
|
||||
cd "$saveddir"
|
||||
# echo Using m2 at $M2_HOME
|
||||
fi
|
||||
|
||||
# For Cygwin, ensure paths are in UNIX format before anything is touched
|
||||
if $cygwin ; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --unix "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# For Migwn, ensure paths are in UNIX format before anything is touched
|
||||
if $mingw ; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME="`(cd "$M2_HOME"; pwd)`"
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
|
||||
# TODO classpath?
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
javaExecutable="`which javac`"
|
||||
if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
|
||||
# readlink(1) is not available as standard on Solaris 10.
|
||||
readLink=`which readlink`
|
||||
if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
|
||||
if $darwin ; then
|
||||
javaHome="`dirname \"$javaExecutable\"`"
|
||||
javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
|
||||
else
|
||||
javaExecutable="`readlink -f \"$javaExecutable\"`"
|
||||
fi
|
||||
javaHome="`dirname \"$javaExecutable\"`"
|
||||
javaHome=`expr "$javaHome" : '\(.*\)/bin'`
|
||||
JAVA_HOME="$javaHome"
|
||||
export JAVA_HOME
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD="`which java`"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." >&2
|
||||
echo " We cannot execute $JAVACMD" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
echo "Warning: JAVA_HOME environment variable is not set."
|
||||
fi
|
||||
|
||||
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# traverses directory structure from process work directory to filesystem root
|
||||
# first directory with .mvn subdirectory is considered project base directory
|
||||
find_maven_basedir() {
|
||||
local basedir=$(pwd)
|
||||
local wdir=$(pwd)
|
||||
while [ "$wdir" != '/' ] ; do
|
||||
if [ -d "$wdir"/.mvn ] ; then
|
||||
basedir=$wdir
|
||||
break
|
||||
fi
|
||||
wdir=$(cd "$wdir/.."; pwd)
|
||||
done
|
||||
echo "${basedir}"
|
||||
}
|
||||
|
||||
# concatenates all lines of a file
|
||||
concat_lines() {
|
||||
if [ -f "$1" ]; then
|
||||
echo "$(tr -s '\n' ' ' < "$1")"
|
||||
fi
|
||||
}
|
||||
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
|
||||
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
export MAVEN_CMD_LINE_ARGS
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
exec "$JAVACMD" \
|
||||
$MAVEN_OPTS \
|
||||
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||
${WRAPPER_LAUNCHER} "$@"
|
||||
|
||||
|
||||
163
pom.xml
163
pom.xml
@@ -2,161 +2,36 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.0.0.RC2</version>
|
||||
<version>1.1.0.BUILD-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>2.2.0.RC2</version>
|
||||
<version>1.1.1.RELEASE</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.3.2.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.2.1.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>2.3.1</kafka.version>
|
||||
<spring-cloud-schema-registry.version>1.0.0.RC2</spring-cloud-schema-registry.version>
|
||||
<spring-cloud-stream.version>3.0.0.RC2</spring-cloud-stream.version>
|
||||
<maven-checkstyle-plugin.failsOnError>true</maven-checkstyle-plugin.failsOnError>
|
||||
<maven-checkstyle-plugin.failsOnViolation>true</maven-checkstyle-plugin.failsOnViolation>
|
||||
<maven-checkstyle-plugin.includeTestSourceDirectory>true</maven-checkstyle-plugin.includeTestSourceDirectory>
|
||||
<spring-boot.version>1.4.0.BUILD-SNAPSHOT</spring-boot.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kafka-streams</module>
|
||||
<module>docs</module>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-schema-registry-client</artifactId>
|
||||
<version>${spring-cloud-schema-registry.version}</version>
|
||||
<scope>test</scope>
|
||||
<artifactId>spring-cloud-stream-dependencies</artifactId>
|
||||
<version>1.1.0.BUILD-SNAPSHOT</version>
|
||||
<type>pom</type>
|
||||
<scope>import</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<build>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>flatten-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.7</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<configuration>
|
||||
<quiet>true</quiet>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<redirectTestOutputToFile>true</redirectTestOutputToFile>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka-common</module>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.8</module>
|
||||
<module>spring-cloud-starter-stream-kafka-0.8</module>
|
||||
<module>spring-cloud-stream-binder-kafka-test-support</module>
|
||||
</modules>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>spring</id>
|
||||
@@ -220,12 +95,4 @@
|
||||
</pluginRepositories>
|
||||
</profile>
|
||||
</profiles>
|
||||
<reporting>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</reporting>
|
||||
</project>
|
||||
|
||||
26
spring-cloud-starter-stream-kafka-0.8/pom.xml
Normal file
26
spring-cloud-starter-stream-kafka-0.8/pom.xml
Normal file
@@ -0,0 +1,26 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.BUILD-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka-0.8</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka for 0.8</description>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>https://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.8</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1 @@
|
||||
provides: spring-cloud-starter-stream-kafka-0.8
|
||||
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.0.0.RC2</version>
|
||||
<version>1.1.0.BUILD-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
provides: spring-cloud-starter-stream-kafka
|
||||
136
spring-cloud-stream-binder-kafka-0.8/pom.xml
Normal file
136
spring-cloud-stream-binder-kafka-0.8/pom.xml
Normal file
@@ -0,0 +1,136 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.8</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-0.8</name>
|
||||
<description>Kafka binder implementation</description>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.BUILD-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<kafka.version>0.8.2.2</kafka.version>
|
||||
<curator.version>2.6.0</curator.version>
|
||||
<spring-integration-kafka.version>1.3.1.BUILD-SNAPSHOT</spring-integration-kafka.version>
|
||||
<rxjava-math.version>1.0.0</rxjava-math.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
|
||||
<scope>test</scope>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-compiler</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.reactivex</groupId>
|
||||
<artifactId>rxjava</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.reactivex</groupId>
|
||||
<artifactId>rxjava-math</artifactId>
|
||||
<version>${rxjava-math.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-framework</artifactId>
|
||||
<version>${curator.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
<version>${curator.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-test</artifactId>
|
||||
<version>${curator.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
</project>
|
||||
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import kafka.cluster.Broker;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils$;
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import scala.collection.JavaConversions;
|
||||
import scala.collection.Seq;
|
||||
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.HealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.integration.kafka.core.BrokerAddress;
|
||||
import org.springframework.integration.kafka.core.Partition;
|
||||
|
||||
/**
|
||||
* Health indicator for Kafka.
|
||||
*
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
|
||||
private final KafkaMessageChannelBinder binder;
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
this.binder = binder;
|
||||
this.configurationProperties = configurationProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Health health() {
|
||||
ZkClient zkClient = null;
|
||||
try {
|
||||
zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(),
|
||||
configurationProperties.getZkConnectionTimeout(), ZKStringSerializer$.MODULE$);
|
||||
Set<String> brokersInClusterSet = new HashSet<>();
|
||||
Seq<Broker> allBrokersInCluster = ZkUtils$.MODULE$.getAllBrokersInCluster(zkClient);
|
||||
Collection<Broker> brokersInCluster = JavaConversions.asJavaCollection(allBrokersInCluster);
|
||||
for (Broker broker : brokersInCluster) {
|
||||
brokersInClusterSet.add(broker.connectionString());
|
||||
}
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (Map.Entry<String, Collection<Partition>> entry : binder.getTopicsInUse().entrySet()) {
|
||||
for (Partition partition : entry.getValue()) {
|
||||
BrokerAddress address = binder.getConnectionFactory().getLeader(partition);
|
||||
if (!brokersInClusterSet.contains(address.toString())) {
|
||||
downMessages.add(address.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
return Health.down().withDetail("Following brokers are down: ", downMessages.toString()).build();
|
||||
}
|
||||
catch (Exception e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
finally {
|
||||
if (zkClient != null) {
|
||||
try {
|
||||
zkClient.close();
|
||||
}
|
||||
catch (Exception e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,713 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.Producer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
|
||||
import org.springframework.context.Lifecycle;
|
||||
import org.springframework.integration.endpoint.AbstractEndpoint;
|
||||
import org.springframework.integration.kafka.core.ConnectionFactory;
|
||||
import org.springframework.integration.kafka.core.DefaultConnectionFactory;
|
||||
import org.springframework.integration.kafka.core.KafkaMessage;
|
||||
import org.springframework.integration.kafka.core.Partition;
|
||||
import org.springframework.integration.kafka.core.ZookeeperConfiguration;
|
||||
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
|
||||
import org.springframework.integration.kafka.listener.AcknowledgingMessageListener;
|
||||
import org.springframework.integration.kafka.listener.Acknowledgment;
|
||||
import org.springframework.integration.kafka.listener.ErrorHandler;
|
||||
import org.springframework.integration.kafka.listener.KafkaMessageListenerContainer;
|
||||
import org.springframework.integration.kafka.listener.KafkaNativeOffsetManager;
|
||||
import org.springframework.integration.kafka.listener.MessageListener;
|
||||
import org.springframework.integration.kafka.listener.OffsetManager;
|
||||
import org.springframework.integration.kafka.support.KafkaHeaders;
|
||||
import org.springframework.integration.kafka.support.KafkaProducerContext;
|
||||
import org.springframework.integration.kafka.support.ProducerConfiguration;
|
||||
import org.springframework.integration.kafka.support.ProducerFactoryBean;
|
||||
import org.springframework.integration.kafka.support.ProducerListener;
|
||||
import org.springframework.integration.kafka.support.ProducerMetadata;
|
||||
import org.springframework.integration.kafka.support.ZookeeperConnect;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.MessagingException;
|
||||
import org.springframework.retry.RetryCallback;
|
||||
import org.springframework.retry.RetryContext;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
import kafka.admin.AdminUtils;
|
||||
import kafka.api.OffsetRequest;
|
||||
import kafka.api.TopicMetadata;
|
||||
import kafka.common.ErrorMapping;
|
||||
import kafka.serializer.DefaultDecoder;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils;
|
||||
import scala.collection.Seq;
|
||||
|
||||
/**
|
||||
* A {@link Binder} that uses Kafka as the underlying middleware.
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Mark Fisher
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaMessageChannelBinder extends
|
||||
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties>,
|
||||
DisposableBean {
|
||||
|
||||
private static final ByteArraySerializer BYTE_ARRAY_SERIALIZER = new ByteArraySerializer();
|
||||
|
||||
private static final ThreadFactory DAEMON_THREAD_FACTORY;
|
||||
|
||||
static {
|
||||
CustomizableThreadFactory threadFactory = new CustomizableThreadFactory("kafka-binder-");
|
||||
threadFactory.setDaemon(true);
|
||||
DAEMON_THREAD_FACTORY = threadFactory;
|
||||
}
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
private final Map<String, Collection<Partition>> topicsInUse = new HashMap<>();
|
||||
|
||||
// -------- Default values for properties -------
|
||||
|
||||
private ConnectionFactory connectionFactory;
|
||||
|
||||
private ProducerListener producerListener;
|
||||
|
||||
private volatile Producer<byte[], byte[]> dlqProducer;
|
||||
|
||||
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
|
||||
|
||||
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties) {
|
||||
super(false, headersToMap(configurationProperties));
|
||||
this.configurationProperties = configurationProperties;
|
||||
}
|
||||
|
||||
private static String[] headersToMap(KafkaBinderConfigurationProperties configurationProperties) {
|
||||
String[] headersToMap;
|
||||
if (ObjectUtils.isEmpty(configurationProperties.getHeaders())) {
|
||||
headersToMap = BinderHeaders.STANDARD_HEADERS;
|
||||
}
|
||||
else {
|
||||
String[] combinedHeadersToMap = Arrays.copyOfRange(BinderHeaders.STANDARD_HEADERS, 0,
|
||||
BinderHeaders.STANDARD_HEADERS.length + configurationProperties.getHeaders().length);
|
||||
System.arraycopy(configurationProperties.getHeaders(), 0, combinedHeadersToMap,
|
||||
BinderHeaders.STANDARD_HEADERS.length,
|
||||
configurationProperties.getHeaders().length);
|
||||
headersToMap = combinedHeadersToMap;
|
||||
}
|
||||
return headersToMap;
|
||||
}
|
||||
|
||||
ConnectionFactory getConnectionFactory() {
|
||||
return this.connectionFactory;
|
||||
}
|
||||
|
||||
public void setProducerListener(ProducerListener producerListener) {
|
||||
this.producerListener = producerListener;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retry configuration for operations such as validating topic creation
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
this.metadataRetryOperations = metadataRetryOperations;
|
||||
}
|
||||
|
||||
public void setExtendedBindingProperties(KafkaExtendedBindingProperties extendedBindingProperties) {
|
||||
this.extendedBindingProperties = extendedBindingProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onInit() throws Exception {
|
||||
ZookeeperConfiguration configuration = new ZookeeperConfiguration(
|
||||
new ZookeeperConnect(this.configurationProperties.getZkConnectionString()));
|
||||
configuration.setBufferSize(this.configurationProperties.getSocketBufferSize());
|
||||
configuration.setMaxWait(this.configurationProperties.getMaxWait());
|
||||
DefaultConnectionFactory defaultConnectionFactory = new DefaultConnectionFactory(configuration);
|
||||
defaultConnectionFactory.afterPropertiesSet();
|
||||
this.connectionFactory = defaultConnectionFactory;
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
|
||||
simpleRetryPolicy.setMaxAttempts(10);
|
||||
retryTemplate.setRetryPolicy(simpleRetryPolicy);
|
||||
|
||||
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
|
||||
backOffPolicy.setInitialInterval(100);
|
||||
backOffPolicy.setMultiplier(2);
|
||||
backOffPolicy.setMaxInterval(1000);
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
this.metadataRetryOperations = retryTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
if (this.dlqProducer != null) {
|
||||
this.dlqProducer.close();
|
||||
this.dlqProducer = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
|
||||
*/
|
||||
static void validateTopicName(String topicName) {
|
||||
try {
|
||||
byte[] utf8 = topicName.getBytes("UTF-8");
|
||||
for (byte b : utf8) {
|
||||
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|
||||
|| (b == '-') || (b == '_'))) {
|
||||
throw new IllegalArgumentException(
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (UnsupportedEncodingException e) {
|
||||
throw new AssertionError(e); // Can't happen
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.extendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
return this.extendedBindingProperties.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
|
||||
Map<String, Collection<Partition>> getTopicsInUse() {
|
||||
return this.topicsInUse;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object createConsumerDestinationIfNecessary(String name, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
validateTopicName(name);
|
||||
if (properties.getInstanceCount() == 0) {
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
Collection<Partition> allPartitions = ensureTopicCreated(name,
|
||||
properties.getInstanceCount() * properties.getConcurrency());
|
||||
|
||||
Collection<Partition> listenedPartitions;
|
||||
|
||||
if (properties.getInstanceCount() == 1) {
|
||||
listenedPartitions = allPartitions;
|
||||
}
|
||||
else {
|
||||
listenedPartitions = new ArrayList<>();
|
||||
for (Partition partition : allPartitions) {
|
||||
// divide partitions across modules
|
||||
if ((partition.getId() % properties.getInstanceCount()) == properties.getInstanceIndex()) {
|
||||
listenedPartitions.add(partition);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.topicsInUse.put(name, listenedPartitions);
|
||||
return listenedPartitions;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected AbstractEndpoint createConsumerEndpoint(String name, String group, Object queue,
|
||||
MessageChannel inputChannel, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
|
||||
Collection<Partition> listenedPartitions = (Collection<Partition>) queue;
|
||||
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
|
||||
|
||||
int concurrency = Math.min(properties.getConcurrency(), listenedPartitions.size());
|
||||
|
||||
final ExecutorService dispatcherTaskExecutor =
|
||||
Executors.newFixedThreadPool(concurrency, DAEMON_THREAD_FACTORY);
|
||||
final KafkaMessageListenerContainer messageListenerContainer = new KafkaMessageListenerContainer(
|
||||
this.connectionFactory, listenedPartitions.toArray(new Partition[listenedPartitions.size()])) {
|
||||
|
||||
@Override
|
||||
public void stop(Runnable callback) {
|
||||
super.stop(callback);
|
||||
if (getOffsetManager() instanceof DisposableBean) {
|
||||
try {
|
||||
((DisposableBean) getOffsetManager()).destroy();
|
||||
}
|
||||
catch (Exception e) {
|
||||
KafkaMessageChannelBinder.this.logger.error("Error while closing the offset manager", e);
|
||||
}
|
||||
}
|
||||
dispatcherTaskExecutor.shutdown();
|
||||
}
|
||||
};
|
||||
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug(
|
||||
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
|
||||
}
|
||||
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
|
||||
|
||||
long referencePoint = properties.getExtension().getStartOffset() != null
|
||||
? properties.getExtension().getStartOffset().getReferencePoint()
|
||||
: (anonymous ? OffsetRequest.LatestTime() : OffsetRequest.EarliestTime());
|
||||
OffsetManager offsetManager = createOffsetManager(consumerGroup, referencePoint);
|
||||
if (properties.getExtension().isResetOffsets()) {
|
||||
offsetManager.resetOffsets(listenedPartitions);
|
||||
}
|
||||
messageListenerContainer.setOffsetManager(offsetManager);
|
||||
messageListenerContainer.setQueueSize(this.configurationProperties.getQueueSize());
|
||||
messageListenerContainer.setMaxFetch(this.configurationProperties.getFetchSize());
|
||||
boolean autoCommitOnError = properties.getExtension().getAutoCommitOnError() != null
|
||||
? properties.getExtension().getAutoCommitOnError()
|
||||
: properties.getExtension().isAutoCommitOffset() && properties.getExtension().isEnableDlq();
|
||||
messageListenerContainer.setAutoCommitOnError(autoCommitOnError);
|
||||
messageListenerContainer.setRecoveryInterval(properties.getExtension().getRecoveryInterval());
|
||||
messageListenerContainer.setConcurrency(concurrency);
|
||||
messageListenerContainer.setDispatcherTaskExecutor(dispatcherTaskExecutor);
|
||||
final KafkaMessageDrivenChannelAdapter kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter(
|
||||
messageListenerContainer);
|
||||
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
|
||||
kafkaMessageDrivenChannelAdapter.setKeyDecoder(new DefaultDecoder(null));
|
||||
kafkaMessageDrivenChannelAdapter.setPayloadDecoder(new DefaultDecoder(null));
|
||||
kafkaMessageDrivenChannelAdapter.setOutputChannel(inputChannel);
|
||||
kafkaMessageDrivenChannelAdapter.setAutoCommitOffset(properties.getExtension().isAutoCommitOffset());
|
||||
kafkaMessageDrivenChannelAdapter.afterPropertiesSet();
|
||||
|
||||
// we need to wrap the adapter listener into a retrying listener so that the retry
|
||||
// logic is applied before the ErrorHandler is executed
|
||||
final RetryTemplate retryTemplate = buildRetryTemplateIfRetryEnabled(properties);
|
||||
if (retryTemplate != null) {
|
||||
if (properties.getExtension().isAutoCommitOffset()) {
|
||||
final MessageListener originalMessageListener = (MessageListener) messageListenerContainer
|
||||
.getMessageListener();
|
||||
messageListenerContainer.setMessageListener(new MessageListener() {
|
||||
|
||||
@Override
|
||||
public void onMessage(final KafkaMessage message) {
|
||||
try {
|
||||
retryTemplate.execute(new RetryCallback<Object, Throwable>() {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) {
|
||||
originalMessageListener.onMessage(message);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof RuntimeException) {
|
||||
throw (RuntimeException) throwable;
|
||||
}
|
||||
else {
|
||||
throw new RuntimeException(throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
messageListenerContainer.setMessageListener(new AcknowledgingMessageListener() {
|
||||
|
||||
final AcknowledgingMessageListener originalMessageListener =
|
||||
(AcknowledgingMessageListener) messageListenerContainer
|
||||
.getMessageListener();
|
||||
|
||||
@Override
|
||||
public void onMessage(final KafkaMessage message, final Acknowledgment acknowledgment) {
|
||||
retryTemplate.execute(new RetryCallback<Object, RuntimeException>() {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) {
|
||||
originalMessageListener.onMessage(message, acknowledgment);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (properties.getExtension().isEnableDlq()) {
|
||||
final String dlqTopic = "error." + name + "." + consumerGroup;
|
||||
initDlqProducer();
|
||||
messageListenerContainer.setErrorHandler(new ErrorHandler() {
|
||||
|
||||
@Override
|
||||
public void handle(Exception thrownException, final KafkaMessage message) {
|
||||
final byte[] key = message.getMessage().key() != null ? Utils.toArray(message.getMessage().key())
|
||||
: null;
|
||||
final byte[] payload = message.getMessage().payload() != null
|
||||
? Utils.toArray(message.getMessage().payload()) : null;
|
||||
KafkaMessageChannelBinder.this.dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload),
|
||||
new Callback() {
|
||||
|
||||
@Override
|
||||
public void onCompletion(RecordMetadata metadata, Exception exception) {
|
||||
StringBuffer messageLog = new StringBuffer();
|
||||
messageLog.append(" a message with key='"
|
||||
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
|
||||
messageLog.append(" and payload='"
|
||||
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
|
||||
messageLog.append(" received from " + message.getMetadata().getPartition());
|
||||
if (exception != null) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ" + messageLog.toString(), exception);
|
||||
}
|
||||
else {
|
||||
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
|
||||
KafkaMessageChannelBinder.this.logger.debug(
|
||||
"Sent to DLQ " + messageLog.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
kafkaMessageDrivenChannelAdapter.start();
|
||||
return kafkaMessageDrivenChannelAdapter;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MessageHandler createProducerMessageHandler(final String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
|
||||
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>(name, byte[].class, byte[].class,
|
||||
BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
|
||||
producerMetadata.setSync(producerProperties.getExtension().isSync());
|
||||
|
||||
KafkaProducerProperties.CompressionType compressionType = producerProperties.getExtension().getCompressionType();
|
||||
|
||||
producerMetadata.setCompressionType(fromKafkaProducerPropertiesCompressionType(compressionType));
|
||||
producerMetadata.setBatchBytes(producerProperties.getExtension().getBufferSize());
|
||||
Properties additional = new Properties();
|
||||
additional.put(ProducerConfig.ACKS_CONFIG, String.valueOf(this.configurationProperties.getRequiredAcks()));
|
||||
additional.put(ProducerConfig.LINGER_MS_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
ProducerFactoryBean<byte[], byte[]> producerFB = new ProducerFactoryBean<>(producerMetadata,
|
||||
this.configurationProperties.getKafkaConnectionString(), additional);
|
||||
final ProducerConfiguration<byte[], byte[]> producerConfiguration = new ProducerConfiguration<>(
|
||||
producerMetadata, producerFB.getObject());
|
||||
producerConfiguration.setProducerListener(this.producerListener);
|
||||
KafkaProducerContext kafkaProducerContext = new KafkaProducerContext();
|
||||
kafkaProducerContext.setProducerConfigurations(
|
||||
Collections.<String, ProducerConfiguration<?, ?>>singletonMap(name, producerConfiguration));
|
||||
return new ProducerConfigurationMessageHandler(producerConfiguration, name);
|
||||
}
|
||||
|
||||
ProducerMetadata.CompressionType fromKafkaProducerPropertiesCompressionType(
|
||||
KafkaProducerProperties.CompressionType compressionType) {
|
||||
switch (compressionType) {
|
||||
case snappy : return ProducerMetadata.CompressionType.snappy;
|
||||
case gzip : return ProducerMetadata.CompressionType.gzip;
|
||||
default: return ProducerMetadata.CompressionType.none;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void createProducerDestinationIfNecessary(String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
validateTopicName(name);
|
||||
Collection<Partition> partitions = ensureTopicCreated(name, properties.getPartitionCount());
|
||||
if (properties.getPartitionCount() < partitions.size()) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("The `partitionCount` of the producer for topic " + name + " is "
|
||||
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
|
||||
+ partitions.size() + " of the topic. The larger number will be used instead.");
|
||||
}
|
||||
}
|
||||
this.topicsInUse.put(name, partitions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
*/
|
||||
private Collection<Partition> ensureTopicCreated(final String topicName, final int partitionCount) {
|
||||
|
||||
final ZkClient zkClient = new ZkClient(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
try {
|
||||
final Properties topicConfig = new Properties();
|
||||
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkClient);
|
||||
if (topicMetadata.errorCode() == ErrorMapping.NoError()) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is
|
||||
// true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount) : partitionCount;
|
||||
if (topicMetadata.partitionsMetadata().size() < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
AdminUtils.addPartitions(zkClient, topicName, effectivePartitionCount, null, false,
|
||||
new Properties());
|
||||
}
|
||||
else {
|
||||
int topicSize = topicMetadata.partitionsMetadata().size();
|
||||
throw new BinderException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ topicSize + (topicSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient);
|
||||
// always consider minPartitionCount for topic creation
|
||||
int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
final scala.collection.Map<Object, Seq<Object>> replicaAssignment = AdminUtils
|
||||
.assignReplicasToBrokers(brokerList, effectivePartitionCount,
|
||||
this.configurationProperties.getReplicationFactor(), -1, -1);
|
||||
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) throws RuntimeException {
|
||||
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topicName,
|
||||
replicaAssignment, topicConfig, true);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
throw new BinderException("Topic " + topicName + " does not exist");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new BinderException("Error fetching Kafka topic metadata: ",
|
||||
ErrorMapping.exceptionFor(topicMetadata.errorCode()));
|
||||
}
|
||||
try {
|
||||
Collection<Partition> partitions = this.metadataRetryOperations
|
||||
.execute(new RetryCallback<Collection<Partition>, Exception>() {
|
||||
|
||||
@Override
|
||||
public Collection<Partition> doWithRetry(RetryContext context) throws Exception {
|
||||
KafkaMessageChannelBinder.this.connectionFactory.refreshMetadata(
|
||||
Collections.singleton(topicName));
|
||||
Collection<Partition> partitions =
|
||||
KafkaMessageChannelBinder.this.connectionFactory.getPartitions(topicName);
|
||||
// do a sanity check on the partition set
|
||||
if (partitions.size() < partitionCount) {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitions.size()
|
||||
+ (partitions.size() > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
KafkaMessageChannelBinder.this.connectionFactory.getLeaders(partitions);
|
||||
return partitions;
|
||||
}
|
||||
});
|
||||
return partitions;
|
||||
}
|
||||
catch (Exception e) {
|
||||
this.logger.error("Cannot initialize Binder", e);
|
||||
throw new BinderException("Cannot initialize binder:", e);
|
||||
}
|
||||
|
||||
}
|
||||
finally {
|
||||
zkClient.close();
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void initDlqProducer() {
|
||||
try {
|
||||
if (this.dlqProducer == null) {
|
||||
synchronized (this) {
|
||||
if (this.dlqProducer == null) {
|
||||
// we can use the producer defaults as we do not need to tune
|
||||
// performance
|
||||
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>("dlqKafkaProducer",
|
||||
byte[].class, byte[].class, BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
|
||||
producerMetadata.setSync(false);
|
||||
producerMetadata.setCompressionType(ProducerMetadata.CompressionType.none);
|
||||
producerMetadata.setBatchBytes(16384);
|
||||
Properties additionalProps = new Properties();
|
||||
additionalProps.put(ProducerConfig.ACKS_CONFIG,
|
||||
String.valueOf(this.configurationProperties.getRequiredAcks()));
|
||||
additionalProps.put(ProducerConfig.LINGER_MS_CONFIG, String.valueOf(0));
|
||||
ProducerFactoryBean<byte[], byte[]> producerFactoryBean = new ProducerFactoryBean<>(
|
||||
producerMetadata, this.configurationProperties.getKafkaConnectionString(),
|
||||
additionalProps);
|
||||
this.dlqProducer = producerFactoryBean.getObject();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException("Cannot initialize DLQ producer:", e);
|
||||
}
|
||||
}
|
||||
|
||||
private OffsetManager createOffsetManager(String group, long referencePoint) {
|
||||
try {
|
||||
|
||||
KafkaNativeOffsetManager kafkaOffsetManager = new KafkaNativeOffsetManager(this.connectionFactory,
|
||||
new ZookeeperConnect(this.configurationProperties.getZkConnectionString()),
|
||||
Collections.<Partition, Long>emptyMap());
|
||||
kafkaOffsetManager.setConsumerId(group);
|
||||
kafkaOffsetManager.setReferenceTimestamp(referencePoint);
|
||||
kafkaOffsetManager.afterPropertiesSet();
|
||||
|
||||
WindowingOffsetManager windowingOffsetManager = new WindowingOffsetManager(kafkaOffsetManager);
|
||||
windowingOffsetManager.setTimespan(this.configurationProperties.getOffsetUpdateTimeWindow());
|
||||
windowingOffsetManager.setCount(this.configurationProperties.getOffsetUpdateCount());
|
||||
windowingOffsetManager.setShutdownTimeout(this.configurationProperties.getOffsetUpdateShutdownTimeout());
|
||||
|
||||
windowingOffsetManager.afterPropertiesSet();
|
||||
return windowingOffsetManager;
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private String toDisplayString(String original, int maxCharacters) {
|
||||
if (original.length() <= maxCharacters) {
|
||||
return original;
|
||||
}
|
||||
return original.substring(0, maxCharacters) + "...";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doManualAck(LinkedList<MessageHeaders> messageHeadersList) {
|
||||
Iterator<MessageHeaders> iterator = messageHeadersList.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
MessageHeaders headers = iterator.next();
|
||||
Acknowledgment acknowledgment = (Acknowledgment) headers.get(KafkaHeaders.ACKNOWLEDGMENT);
|
||||
Assert.notNull(acknowledgment,
|
||||
"Acknowledgement shouldn't be null when acknowledging kafka message " + "manually.");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(OffsetRequest.EarliestTime()), latest(OffsetRequest.LatestTime());
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
}
|
||||
|
||||
private final static class ProducerConfigurationMessageHandler implements MessageHandler, Lifecycle {
|
||||
|
||||
private ProducerConfiguration<byte[], byte[]> delegate;
|
||||
|
||||
private String targetTopic;
|
||||
|
||||
private boolean running = true;
|
||||
|
||||
private ProducerConfigurationMessageHandler(
|
||||
ProducerConfiguration<byte[], byte[]> delegate, String targetTopic) {
|
||||
Assert.notNull(delegate, "Delegate cannot be null");
|
||||
Assert.hasText(targetTopic, "Target topic cannot be null");
|
||||
this.delegate = delegate;
|
||||
this.targetTopic = targetTopic;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
this.delegate.stop();
|
||||
this.running = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRunning() {
|
||||
return this.running;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleMessage(Message<?> message) throws MessagingException {
|
||||
this.delegate.send(this.targetTopic,
|
||||
message.getHeaders().get(BinderHeaders.PARTITION_HEADER, Integer.class), null,
|
||||
(byte[]) message.getPayload());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,262 @@
|
||||
/*
|
||||
* Copyright 2015 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import rx.Observable;
|
||||
import rx.Subscription;
|
||||
import rx.functions.Action0;
|
||||
import rx.functions.Action1;
|
||||
import rx.functions.Func1;
|
||||
import rx.functions.Func2;
|
||||
import rx.observables.GroupedObservable;
|
||||
import rx.observables.MathObservable;
|
||||
import rx.subjects.PublishSubject;
|
||||
import rx.subjects.SerializedSubject;
|
||||
import rx.subjects.Subject;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.integration.kafka.core.Partition;
|
||||
import org.springframework.integration.kafka.listener.OffsetManager;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* An {@link OffsetManager} that aggregates writes over a time or count window, using an underlying delegate to
|
||||
* do the actual operations. Its purpose is to reduce the performance impact of writing operations
|
||||
* wherever this is desirable.
|
||||
*
|
||||
* Either a time window or a number of writes can be specified, but not both.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class WindowingOffsetManager implements OffsetManager, InitializingBean, DisposableBean {
|
||||
|
||||
private final CreatePartitionAndOffsetFunction createPartitionAndOffsetFunction = new CreatePartitionAndOffsetFunction();
|
||||
|
||||
private final GetOffsetFunction getOffsetFunction = new GetOffsetFunction();
|
||||
|
||||
private final ComputeMaximumOffsetByPartitionFunction findHighestOffsetInPartitionGroup = new ComputeMaximumOffsetByPartitionFunction();
|
||||
|
||||
private final GetPartitionFunction getPartition = new GetPartitionFunction();
|
||||
|
||||
private final FindHighestOffsetsByPartitionFunction findHighestOffsetsByPartition = new FindHighestOffsetsByPartitionFunction();
|
||||
|
||||
private final DelegateUpdateOffsetAction delegateUpdateOffsetAction = new DelegateUpdateOffsetAction();
|
||||
|
||||
private final NotifyObservableClosedAction notifyObservableClosed = new NotifyObservableClosedAction();
|
||||
|
||||
private final OffsetManager delegate;
|
||||
|
||||
private long timespan = 10 * 1000;
|
||||
|
||||
private int count;
|
||||
|
||||
private Subject<PartitionAndOffset, PartitionAndOffset> offsets;
|
||||
|
||||
private Subscription subscription;
|
||||
|
||||
private int shutdownTimeout = 2000;
|
||||
|
||||
private CountDownLatch shutdownLatch;
|
||||
|
||||
public WindowingOffsetManager(OffsetManager offsetManager) {
|
||||
this.delegate = offsetManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* The timespan for aggregating write operations, before invoking the underlying {@link OffsetManager}.
|
||||
*
|
||||
* @param timespan duration in milliseconds
|
||||
*/
|
||||
public void setTimespan(long timespan) {
|
||||
Assert.isTrue(timespan >= 0, "Timespan must be a positive value");
|
||||
this.timespan = timespan;
|
||||
}
|
||||
|
||||
/**
|
||||
* How many writes should be aggregated, before invoking the underlying {@link OffsetManager}. Setting this value
|
||||
* to 1 effectively disables windowing.
|
||||
*
|
||||
* @param count number of writes
|
||||
*/
|
||||
public void setCount(int count) {
|
||||
Assert.isTrue(count >= 0, "Count must be a positive value");
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
/**
|
||||
* The timeout that {@link #close()} and {@link #destroy()} operations will wait for receving a confirmation that the
|
||||
* underlying writes have been processed.
|
||||
*
|
||||
* @param shutdownTimeout duration in milliseconds
|
||||
*/
|
||||
public void setShutdownTimeout(int shutdownTimeout) {
|
||||
this.shutdownTimeout = shutdownTimeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
Assert.isTrue(timespan > 0 ^ count > 0, "Only one of the timespan or count must be set");
|
||||
// create the stream if windowing is set, and count is higher than 1
|
||||
if (timespan > 0 || count > 1) {
|
||||
offsets = new SerializedSubject<>(PublishSubject.<PartitionAndOffset>create());
|
||||
// window by either count or time
|
||||
Observable<Observable<PartitionAndOffset>> window =
|
||||
timespan > 0 ? offsets.window(timespan, TimeUnit.MILLISECONDS) : offsets.window(count);
|
||||
Observable<PartitionAndOffset> maximumOffsetsByWindow = window
|
||||
.flatMap(findHighestOffsetsByPartition)
|
||||
.doOnCompleted(notifyObservableClosed);
|
||||
subscription = maximumOffsetsByWindow.subscribe(delegateUpdateOffsetAction);
|
||||
}
|
||||
else {
|
||||
offsets = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
this.flush();
|
||||
this.close();
|
||||
if (delegate instanceof DisposableBean) {
|
||||
((DisposableBean) delegate).destroy();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateOffset(Partition partition, long offset) {
|
||||
if (offsets != null) {
|
||||
offsets.onNext(new PartitionAndOffset(partition, offset));
|
||||
}
|
||||
else {
|
||||
delegate.updateOffset(partition, offset);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getOffset(Partition partition) {
|
||||
return delegate.getOffset(partition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteOffset(Partition partition) {
|
||||
delegate.deleteOffset(partition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetOffsets(Collection<Partition> partition) {
|
||||
delegate.resetOffsets(partition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (offsets != null) {
|
||||
shutdownLatch = new CountDownLatch(1);
|
||||
offsets.onCompleted();
|
||||
try {
|
||||
shutdownLatch.await(shutdownTimeout, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
// ignore
|
||||
}
|
||||
subscription.unsubscribe();
|
||||
}
|
||||
delegate.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
delegate.flush();
|
||||
}
|
||||
|
||||
private final class PartitionAndOffset {
|
||||
|
||||
private final Partition partition;
|
||||
|
||||
private final Long offset;
|
||||
|
||||
private PartitionAndOffset(Partition partition, Long offset) {
|
||||
this.partition = partition;
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public Partition getPartition() {
|
||||
return partition;
|
||||
}
|
||||
|
||||
public Long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
}
|
||||
|
||||
private class DelegateUpdateOffsetAction implements Action1<PartitionAndOffset> {
|
||||
@Override
|
||||
public void call(PartitionAndOffset partitionAndOffset) {
|
||||
delegate.updateOffset(partitionAndOffset.getPartition(), partitionAndOffset.getOffset());
|
||||
}
|
||||
}
|
||||
|
||||
private class NotifyObservableClosedAction implements Action0 {
|
||||
@Override
|
||||
public void call() {
|
||||
if (shutdownLatch != null) {
|
||||
shutdownLatch.countDown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class CreatePartitionAndOffsetFunction implements Func2<Partition, Long, PartitionAndOffset> {
|
||||
@Override
|
||||
public PartitionAndOffset call(Partition partition, Long offset) {
|
||||
return new PartitionAndOffset(partition, offset);
|
||||
}
|
||||
}
|
||||
|
||||
private class GetOffsetFunction implements Func1<PartitionAndOffset, Long> {
|
||||
@Override
|
||||
public Long call(PartitionAndOffset partitionAndOffset) {
|
||||
return partitionAndOffset.getOffset();
|
||||
}
|
||||
}
|
||||
|
||||
private class ComputeMaximumOffsetByPartitionFunction implements Func1<GroupedObservable<Partition, PartitionAndOffset>, Observable<PartitionAndOffset>> {
|
||||
@Override
|
||||
public Observable<PartitionAndOffset> call(GroupedObservable<Partition, PartitionAndOffset> group) {
|
||||
return Observable.zip(Observable.just(group.getKey()),
|
||||
MathObservable.max(group.map(getOffsetFunction)),
|
||||
createPartitionAndOffsetFunction);
|
||||
}
|
||||
}
|
||||
|
||||
private class GetPartitionFunction implements Func1<PartitionAndOffset, Partition> {
|
||||
@Override
|
||||
public Partition call(PartitionAndOffset partitionAndOffset) {
|
||||
return partitionAndOffset.getPartition();
|
||||
}
|
||||
}
|
||||
|
||||
private class FindHighestOffsetsByPartitionFunction implements Func1<Observable<PartitionAndOffset>, Observable<PartitionAndOffset>> {
|
||||
@Override
|
||||
public Observable<PartitionAndOffset> call(Observable<PartitionAndOffset> windowBuffer) {
|
||||
return windowBuffer.groupBy(getPartition).flatMap(findHighestOffsetInPartitionGroup);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.integration.kafka.support.ProducerListener;
|
||||
|
||||
/**
|
||||
* @author David Turanski
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(Binder.class)
|
||||
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class })
|
||||
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
|
||||
public class KafkaBinderConfiguration {
|
||||
|
||||
@Autowired
|
||||
private Codec codec;
|
||||
|
||||
@Autowired
|
||||
private KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
@Autowired
|
||||
private KafkaExtendedBindingProperties kafkaExtendedBindingProperties;
|
||||
|
||||
@Autowired
|
||||
private ProducerListener producerListener;
|
||||
|
||||
@Bean
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
kafkaMessageChannelBinder.setCodec(codec);
|
||||
kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
kafkaMessageChannelBinder.setExtendedBindingProperties(kafkaExtendedBindingProperties);
|
||||
return kafkaMessageChannelBinder;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean(ProducerListener.class)
|
||||
ProducerListener producerListener() {
|
||||
return new LoggingProducerListener();
|
||||
}
|
||||
|
||||
@Bean
|
||||
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, configurationProperties);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Copyright 2015 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
@@ -0,0 +1,2 @@
|
||||
kafka:\
|
||||
org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration
|
||||
@@ -0,0 +1,820 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.DirectFieldAccessor;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.PartitionCapableBinderTests;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.TestUtils;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.test.junit.kafka.KafkaTestSupport;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.integration.kafka.core.Partition;
|
||||
import org.springframework.integration.kafka.core.TopicNotFoundException;
|
||||
import org.springframework.integration.kafka.support.KafkaHeaders;
|
||||
import org.springframework.integration.kafka.support.ProducerConfiguration;
|
||||
import org.springframework.integration.kafka.support.ProducerMetadata;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
import org.springframework.messaging.MessagingException;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.retry.backoff.FixedBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
|
||||
import kafka.admin.AdminUtils;
|
||||
import kafka.api.TopicMetadata;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class KafkaBinderTests extends
|
||||
PartitionCapableBinderTests<KafkaTestBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaTestSupport kafkaTestSupport = new KafkaTestSupport();
|
||||
|
||||
private KafkaTestBinder binder;
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected KafkaTestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binder = new KafkaTestBinder(binderConfiguration);
|
||||
}
|
||||
return binder;
|
||||
}
|
||||
|
||||
private KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
binderConfiguration.setBrokers(kafkaTestSupport.getBrokerAddress());
|
||||
binderConfiguration.setZkNodes(kafkaTestSupport.getZkConnectString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ExtendedConsumerProperties<KafkaConsumerProperties> createConsumerProperties() {
|
||||
return new ExtendedConsumerProperties<>(new KafkaConsumerProperties());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ExtendedProducerProperties<KafkaProducerProperties> createProducerProperties() {
|
||||
return new ExtendedProducerProperties<>(new KafkaProducerProperties());
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
|
||||
if (multiplier != null) {
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean usesExplicitRouting() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getClassUnderTestName() {
|
||||
return CLASS_UNDER_TEST_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spy spyOn(final String name) {
|
||||
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDlqAndRetry() {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
DirectChannel moduleInputChannel = new DirectChannel();
|
||||
QueueChannel dlqChannel = new QueueChannel();
|
||||
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
|
||||
moduleInputChannel.subscribe(handler);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setPartitionCount(10);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setMaxAttempts(3);
|
||||
consumerProperties.setBackOffInitialInterval(100);
|
||||
consumerProperties.setBackOffMaxInterval(150);
|
||||
consumerProperties.getExtension().setEnableDlq(true);
|
||||
long uniqueBindingId = System.currentTimeMillis();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
|
||||
"testGroup", moduleInputChannel, consumerProperties);
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
|
||||
dlqConsumerProperties.setMaxAttempts(1);
|
||||
|
||||
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
|
||||
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
|
||||
|
||||
String testMessagePayload = "test." + UUID.randomUUID().toString();
|
||||
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
|
||||
moduleOutputChannel.send(testMessage);
|
||||
|
||||
Message<?> receivedMessage = receive(dlqChannel, 3);
|
||||
assertThat(receivedMessage).isNotNull();
|
||||
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
dlqConsumerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultAutoCommitOnErrorWithoutDlq() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
DirectChannel moduleInputChannel = new DirectChannel();
|
||||
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
|
||||
moduleInputChannel.subscribe(handler);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setPartitionCount(10);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setMaxAttempts(1);
|
||||
consumerProperties.setBackOffInitialInterval(100);
|
||||
consumerProperties.setBackOffMaxInterval(150);
|
||||
long uniqueBindingId = System.currentTimeMillis();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
|
||||
"testGroup", moduleInputChannel, consumerProperties);
|
||||
|
||||
String testMessagePayload = "test." + UUID.randomUUID().toString();
|
||||
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
|
||||
moduleOutputChannel.send(testMessage);
|
||||
|
||||
assertThat(handler.getLatch().await((int) (timeoutMultiplier * 1000), TimeUnit.MILLISECONDS));
|
||||
// first attempt fails
|
||||
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
|
||||
Message<?> receivedMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
|
||||
assertThat(receivedMessage).isNotNull();
|
||||
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
consumerBinding.unbind();
|
||||
|
||||
// on the second attempt the message is redelivered
|
||||
QueueChannel successfulInputChannel = new QueueChannel();
|
||||
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
|
||||
successfulInputChannel, consumerProperties);
|
||||
String testMessage2Payload = "test." + UUID.randomUUID().toString();
|
||||
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
|
||||
moduleOutputChannel.send(testMessage2);
|
||||
|
||||
Message<?> firstReceived = receive(successfulInputChannel);
|
||||
assertThat(firstReceived.getPayload()).isEqualTo(testMessagePayload);
|
||||
Message<?> secondReceived = receive(successfulInputChannel);
|
||||
assertThat(secondReceived.getPayload()).isEqualTo(testMessage2Payload);
|
||||
consumerBinding.unbind();
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultAutoCommitOnErrorWithDlq() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
DirectChannel moduleInputChannel = new DirectChannel();
|
||||
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
|
||||
moduleInputChannel.subscribe(handler);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setPartitionCount(10);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setMaxAttempts(3);
|
||||
consumerProperties.setBackOffInitialInterval(100);
|
||||
consumerProperties.setBackOffMaxInterval(150);
|
||||
consumerProperties.getExtension().setEnableDlq(true);
|
||||
long uniqueBindingId = System.currentTimeMillis();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
|
||||
"testGroup", moduleInputChannel, consumerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
|
||||
dlqConsumerProperties.setMaxAttempts(1);
|
||||
QueueChannel dlqChannel = new QueueChannel();
|
||||
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
|
||||
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
|
||||
|
||||
String testMessagePayload = "test." + UUID.randomUUID().toString();
|
||||
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
|
||||
moduleOutputChannel.send(testMessage);
|
||||
|
||||
Message<?> dlqMessage = receive(dlqChannel, 3);
|
||||
assertThat(dlqMessage).isNotNull();
|
||||
assertThat(dlqMessage.getPayload()).isEqualTo(testMessagePayload);
|
||||
|
||||
// first attempt fails
|
||||
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
|
||||
Message<?> handledMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
|
||||
assertThat(handledMessage).isNotNull();
|
||||
assertThat(handledMessage.getPayload()).isEqualTo(testMessagePayload);
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
|
||||
dlqConsumerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
|
||||
// on the second attempt the message is not redelivered because the DLQ is set
|
||||
QueueChannel successfulInputChannel = new QueueChannel();
|
||||
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
|
||||
successfulInputChannel, consumerProperties);
|
||||
String testMessage2Payload = "test." + UUID.randomUUID().toString();
|
||||
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
|
||||
moduleOutputChannel.send(testMessage2);
|
||||
|
||||
Message<?> receivedMessage = receive(successfulInputChannel);
|
||||
assertThat(receivedMessage.getPayload()).isEqualTo(testMessage2Payload);
|
||||
|
||||
consumerBinding.unbind();
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testValidateKafkaTopicName() {
|
||||
KafkaMessageChannelBinder.validateTopicName("foo:bar");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCompression() throws Exception {
|
||||
final ProducerMetadata.CompressionType[] codecs = new ProducerMetadata.CompressionType[] {
|
||||
ProducerMetadata.CompressionType.none, ProducerMetadata.CompressionType.gzip,
|
||||
ProducerMetadata.CompressionType.snappy };
|
||||
byte[] testPayload = new byte[2048];
|
||||
Arrays.fill(testPayload, (byte) 65);
|
||||
KafkaTestBinder binder = getBinder();
|
||||
for (ProducerMetadata.CompressionType codec : codecs) {
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().setCompressionType(fromProducerMetadataCompressionType(codec));
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
|
||||
createConsumerProperties());
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
|
||||
.build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCustomPartitionCountOverridesDefaultIfLarger() throws Exception {
|
||||
|
||||
byte[] testPayload = new byte[2048];
|
||||
Arrays.fill(testPayload, (byte) 65);
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binderConfiguration.setMinPartitionCount(10);
|
||||
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
|
||||
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setPartitionCount(10);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
long uniqueBindingId = System.currentTimeMillis();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
|
||||
moduleInputChannel, consumerProperties);
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
|
||||
.build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
|
||||
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
|
||||
.getPartitions("foo" + uniqueBindingId + ".0");
|
||||
assertThat(partitions).hasSize(10);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCustomPartitionCountDoesNotOverridePartitioningIfSmaller() throws Exception {
|
||||
|
||||
byte[] testPayload = new byte[2048];
|
||||
Arrays.fill(testPayload, (byte) 65);
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binderConfiguration.setMinPartitionCount(6);
|
||||
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setPartitionCount(5);
|
||||
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
long uniqueBindingId = System.currentTimeMillis();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
|
||||
moduleInputChannel, consumerProperties);
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
|
||||
.build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
|
||||
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
|
||||
.getPartitions("foo" + uniqueBindingId + ".0");
|
||||
assertThat(partitions).hasSize(6);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCustomPartitionCountOverridesPartitioningIfLarger() throws Exception {
|
||||
|
||||
byte[] testPayload = new byte[2048];
|
||||
Arrays.fill(testPayload, (byte) 65);
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binderConfiguration.setMinPartitionCount(4);
|
||||
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
|
||||
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setPartitionCount(5);
|
||||
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
long uniqueBindingId = System.currentTimeMillis();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
|
||||
moduleInputChannel, consumerProperties);
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
|
||||
.build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
|
||||
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
|
||||
.getPartitions("foo" + uniqueBindingId + ".0");
|
||||
assertThat(partitions).hasSize(5);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testDefaultConsumerStartsAtEarliest() throws Exception {
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
DirectChannel output = new DirectChannel();
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
|
||||
String testTopicName = UUID.randomUUID().toString();
|
||||
binder.bindProducer(testTopicName, output, createProducerProperties());
|
||||
String testPayload1 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload1.getBytes()));
|
||||
binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
|
||||
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage1).isNotNull();
|
||||
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload1);
|
||||
String testPayload2 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload2.getBytes()));
|
||||
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage2).isNotNull();
|
||||
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testEarliest() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel output = new DirectChannel();
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
|
||||
String testTopicName = UUID.randomUUID().toString();
|
||||
binder.bindProducer(testTopicName, output, createProducerProperties());
|
||||
String testPayload1 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload1.getBytes()));
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
|
||||
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
|
||||
binder.bindConsumer(testTopicName, "startOffsets", input1, properties);
|
||||
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage1).isNotNull();
|
||||
String testPayload2 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload2.getBytes()));
|
||||
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage2).isNotNull();
|
||||
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testReset() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel output = new DirectChannel();
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
|
||||
String testTopicName = UUID.randomUUID().toString();
|
||||
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
|
||||
createProducerProperties());
|
||||
String testPayload1 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload1.getBytes()));
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
|
||||
properties.getExtension().setResetOffsets(true);
|
||||
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
|
||||
properties);
|
||||
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage1).isNotNull();
|
||||
String testPayload2 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload2.getBytes()));
|
||||
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage2).isNotNull();
|
||||
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
|
||||
consumerBinding.unbind();
|
||||
|
||||
String testPayload3 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload3.getBytes()));
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties2 = createConsumerProperties();
|
||||
properties2.getExtension().setResetOffsets(true);
|
||||
properties2.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
|
||||
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, properties2);
|
||||
Message<byte[]> receivedMessage4 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage4).isNotNull();
|
||||
assertThat(new String(receivedMessage4.getPayload())).isEqualTo(testPayload1);
|
||||
Message<byte[]> receivedMessage5 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage5).isNotNull();
|
||||
assertThat(new String(receivedMessage5.getPayload())).isEqualTo(testPayload2);
|
||||
Message<byte[]> receivedMessage6 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage6).isNotNull();
|
||||
assertThat(new String(receivedMessage6.getPayload())).isEqualTo(testPayload3);
|
||||
consumerBinding.unbind();
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testResume() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
DirectChannel output = new DirectChannel();
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
|
||||
String testTopicName = UUID.randomUUID().toString();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
|
||||
createProducerProperties());
|
||||
String testPayload1 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload1.getBytes()));
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> firstConsumerProperties = createConsumerProperties();
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
|
||||
firstConsumerProperties);
|
||||
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage1).isNotNull();
|
||||
String testPayload2 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload2.getBytes()));
|
||||
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage2).isNotNull();
|
||||
assertThat(new String(receivedMessage2.getPayload())).isNotNull();
|
||||
consumerBinding.unbind();
|
||||
|
||||
String testPayload3 = "foo-" + UUID.randomUUID().toString();
|
||||
output.send(new GenericMessage<>(testPayload3.getBytes()));
|
||||
|
||||
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
|
||||
Message<byte[]> receivedMessage3 = (Message<byte[]>) receive(input1);
|
||||
assertThat(receivedMessage3).isNotNull();
|
||||
assertThat(new String(receivedMessage3.getPayload())).isEqualTo(testPayload3);
|
||||
consumerBinding.unbind();
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSyncProducerMetadata() throws Exception {
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
DirectChannel output = new DirectChannel();
|
||||
String testTopicName = UUID.randomUUID().toString();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.getExtension().setSync(true);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output, properties);
|
||||
DirectFieldAccessor accessor = new DirectFieldAccessor(extractEndpoint(producerBinding));
|
||||
MessageHandler handler = (MessageHandler) accessor.getPropertyValue("handler");
|
||||
DirectFieldAccessor accessor1 = new DirectFieldAccessor(handler);
|
||||
ProducerConfiguration producerConfiguration = (ProducerConfiguration) accessor1
|
||||
.getPropertyValue("producerConfiguration");
|
||||
assertThat(producerConfiguration.getProducerMetadata().isSync())
|
||||
.withFailMessage("Kafka Sync Producer should have been enabled.");
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAutoCreateTopicsDisabledFailsIfTopicMissing() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
configurationProperties.setAutoCreateTopics(false);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
|
||||
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
String testTopicName = "nonexisting" + System.currentTimeMillis();
|
||||
try {
|
||||
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
fail();
|
||||
}
|
||||
catch (Exception e) {
|
||||
assertThat(e).isInstanceOf(BinderException.class);
|
||||
assertThat(e).hasMessageContaining("Topic " + testTopicName + " does not exist");
|
||||
}
|
||||
|
||||
try {
|
||||
binder.getConnectionFactory().getPartitions(testTopicName);
|
||||
fail();
|
||||
}
|
||||
catch (Exception e) {
|
||||
assertThat(e).isInstanceOf(TopicNotFoundException.class);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAutoConfigureTopicsDisabledSucceedsIfTopicExisting() throws Exception {
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 5, 1, new Properties());
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
configurationProperties.setAutoCreateTopics(false);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
Binding<MessageChannel> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
binding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAutoAddPartitionsDisabledFailsIfTopicUnderpartitioned() throws Exception {
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
configurationProperties.setAutoAddPartitions(false);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
// this consumer must consume from partition 2
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
try {
|
||||
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
}
|
||||
catch (Exception e) {
|
||||
assertThat(e).isInstanceOf(BinderException.class);
|
||||
assertThat(e)
|
||||
.hasMessageContaining("The number of expected partitions was: 3, but 1 has been found instead");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAutoAddPartitionsDisabledSucceedsIfTopicPartitionedCorrectly() throws Exception {
|
||||
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
configurationProperties.setAutoAddPartitions(false);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
|
||||
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
// this consumer must consume from partition 2
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
|
||||
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
|
||||
Partition[] listenedPartitions = TestUtils.getPropertyValue(binding,
|
||||
"endpoint.val$messageListenerContainer.partitions", Partition[].class);
|
||||
|
||||
assertThat(listenedPartitions).hasSize(2);
|
||||
assertThat(listenedPartitions).contains(new Partition(testTopicName, 2), new Partition(testTopicName, 5));
|
||||
Collection<Partition> partitions = binder.getConnectionFactory().getPartitions(testTopicName);
|
||||
assertThat(partitions).hasSize(6);
|
||||
binding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAutoCreateTopicsEnabledSucceeds() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
configurationProperties.setAutoCreateTopics(true);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
|
||||
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
String testTopicName = "nonexisting" + System.currentTimeMillis();
|
||||
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
binding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPartitionCountNotReduced() throws Exception {
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
|
||||
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
binding.unbind();
|
||||
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
|
||||
kafkaTestSupport.getZkClient());
|
||||
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPartitionCountIncreasedIfAutoAddPartitionsSet() throws Exception {
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
configurationProperties.setMinPartitionCount(6);
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
|
||||
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
binding.unbind();
|
||||
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
|
||||
kafkaTestSupport.getZkClient());
|
||||
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
|
||||
}
|
||||
|
||||
KafkaProducerProperties.CompressionType fromProducerMetadataCompressionType(
|
||||
ProducerMetadata.CompressionType compressionType) {
|
||||
switch (compressionType) {
|
||||
case snappy : return KafkaProducerProperties.CompressionType.snappy;
|
||||
case gzip : return KafkaProducerProperties.CompressionType.gzip;
|
||||
default: return KafkaProducerProperties.CompressionType.none;
|
||||
}
|
||||
}
|
||||
|
||||
private static final class FailingInvocationCountingMessageHandler implements MessageHandler {
|
||||
|
||||
private int invocationCount;
|
||||
|
||||
private final LinkedHashMap<Long, Message<?>> receivedMessages = new LinkedHashMap<>();
|
||||
|
||||
private final CountDownLatch latch;
|
||||
|
||||
private FailingInvocationCountingMessageHandler(int latchSize) {
|
||||
latch = new CountDownLatch(latchSize);
|
||||
}
|
||||
|
||||
private FailingInvocationCountingMessageHandler() {
|
||||
this(1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleMessage(Message<?> message) throws MessagingException {
|
||||
invocationCount++;
|
||||
Long offset = message.getHeaders().get(KafkaHeaders.OFFSET, Long.class);
|
||||
// using the offset as key allows to ensure that we don't store duplicate
|
||||
// messages on retry
|
||||
if (!receivedMessages.containsKey(offset)) {
|
||||
receivedMessages.put(offset, message);
|
||||
latch.countDown();
|
||||
}
|
||||
throw new RuntimeException();
|
||||
}
|
||||
|
||||
public LinkedHashMap<Long, Message<?>> getReceivedMessages() {
|
||||
return receivedMessages;
|
||||
}
|
||||
|
||||
public int getInvocationCount() {
|
||||
return invocationCount;
|
||||
}
|
||||
|
||||
public CountDownLatch getLatch() {
|
||||
return latch;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractTestBinder;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.test.junit.kafka.TestKafkaCluster;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.codec.kryo.KryoRegistrar;
|
||||
import org.springframework.integration.codec.kryo.PojoCodec;
|
||||
import org.springframework.integration.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.integration.kafka.support.ProducerListener;
|
||||
import org.springframework.integration.tuple.TupleKryoRegistrar;
|
||||
|
||||
import com.esotericsoftware.kryo.Kryo;
|
||||
import com.esotericsoftware.kryo.Registration;
|
||||
|
||||
/**
|
||||
* Test support class for {@link KafkaMessageChannelBinder}. Creates a binder that uses a
|
||||
* test {@link TestKafkaCluster kafka cluster}.
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaTestBinder extends
|
||||
AbstractTestBinder<KafkaMessageChannelBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
|
||||
public KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
try {
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
|
||||
binder.setCodec(getCodec());
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanup() {
|
||||
// do nothing - the rule will take care of that
|
||||
}
|
||||
|
||||
private static Codec getCodec() {
|
||||
return new PojoCodec(new TupleRegistrar());
|
||||
}
|
||||
|
||||
private static class TupleRegistrar implements KryoRegistrar {
|
||||
private final TupleKryoRegistrar delegate = new TupleKryoRegistrar();
|
||||
|
||||
@Override
|
||||
public void registerTypes(Kryo kryo) {
|
||||
this.delegate.registerTypes(kryo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Registration> getRegistrations() {
|
||||
return this.delegate.getRegistrations();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright 2014 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy;
|
||||
import org.springframework.cloud.stream.binder.PartitionSelectorStrategy;
|
||||
import org.springframework.messaging.Message;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class RawKafkaPartitionTestSupport implements PartitionKeyExtractorStrategy, PartitionSelectorStrategy {
|
||||
|
||||
@Override
|
||||
public int selectPartition(Object key, int divisor) {
|
||||
return ((byte[]) key)[0] % divisor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object extractKey(Message<?> message) {
|
||||
return message.getPayload();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,264 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
|
||||
import org.springframework.integration.IntegrationMessageHeaderAccessor;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.integration.endpoint.AbstractEndpoint;
|
||||
import org.springframework.integration.support.MessageBuilder;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Mark Fisher
|
||||
*/
|
||||
public class RawModeKafkaBinderTests extends KafkaBinderTests {
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testPartitionedModuleJava() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionCount(6);
|
||||
|
||||
DirectChannel output = new DirectChannel();
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, properties);
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0J");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1J");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2J");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
|
||||
|
||||
output.send(new GenericMessage<>(new byte[] { (byte) 0 }));
|
||||
output.send(new GenericMessage<>(new byte[] { (byte) 1 }));
|
||||
output.send(new GenericMessage<>(new byte[] { (byte) 2 }));
|
||||
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testPartitionedModuleSpEL() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
|
||||
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
|
||||
properties.setPartitionCount(6);
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
|
||||
DirectChannel output = new DirectChannel();
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, properties);
|
||||
try {
|
||||
AbstractEndpoint endpoint = (AbstractEndpoint)extractEndpoint(outputBinding);
|
||||
assertThat(getEndpointRouting(endpoint)).contains("part.0-' + headers['partition']");
|
||||
}
|
||||
catch (UnsupportedOperationException ignored) {
|
||||
|
||||
}
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0S");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1S");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2S");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
|
||||
|
||||
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] { 2 })
|
||||
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
|
||||
output.send(message2);
|
||||
output.send(new GenericMessage<>(new byte[] { 1 }));
|
||||
output.send(new GenericMessage<>(new byte[] { 0 }));
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testSendAndReceive() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
// Ignored, since raw mode does not support headers
|
||||
@Test
|
||||
@Override
|
||||
@Ignore
|
||||
public void testSendAndReceiveNoOriginalContentType() throws Exception {
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSendAndReceiveWithExplicitConsumerGroup() {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
// Test pub/sub by emulating how StreamPlugin handles taps
|
||||
QueueChannel module1InputChannel = new QueueChannel();
|
||||
QueueChannel module2InputChannel = new QueueChannel();
|
||||
QueueChannel module3InputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.0", "test", module1InputChannel,
|
||||
consumerProperties);
|
||||
// A new module is using the tap as an input channel
|
||||
String fooTapName = "baz.0";
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer(fooTapName, "tap1", module2InputChannel,
|
||||
consumerProperties);
|
||||
// Another new module is using tap as an input channel
|
||||
String barTapName = "baz.0";
|
||||
Binding<MessageChannel> input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel,
|
||||
consumerProperties);
|
||||
|
||||
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
|
||||
boolean success = false;
|
||||
boolean retried = false;
|
||||
while (!success) {
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(module1InputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
|
||||
|
||||
Message<?> tapped1 = receive(module2InputChannel);
|
||||
Message<?> tapped2 = receive(module3InputChannel);
|
||||
if (tapped1 == null || tapped2 == null) {
|
||||
// listener may not have started
|
||||
assertThat(retried).isFalse().withFailMessage("Failed to receive tap after retry");
|
||||
retried = true;
|
||||
continue;
|
||||
}
|
||||
success = true;
|
||||
assertThat(new String((byte[]) tapped1.getPayload())).isEqualTo("foo");
|
||||
assertThat(new String((byte[]) tapped2.getPayload())).isEqualTo("foo");
|
||||
}
|
||||
// delete one tap stream is deleted
|
||||
input3Binding.unbind();
|
||||
Message<?> message2 = MessageBuilder.withPayload("bar".getBytes()).build();
|
||||
moduleOutputChannel.send(message2);
|
||||
|
||||
// other tap still receives messages
|
||||
Message<?> tapped = receive(module2InputChannel);
|
||||
assertThat(tapped).isNotNull();
|
||||
|
||||
// removed tap does not
|
||||
assertThat(receive(module3InputChannel)).isNull();
|
||||
|
||||
// re-subscribed tap does receive the message
|
||||
input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel, createConsumerProperties());
|
||||
assertThat(receive(module3InputChannel)).isNotNull();
|
||||
|
||||
// clean up
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
input3Binding.unbind();
|
||||
producerBinding.unbind();
|
||||
assertThat(extractEndpoint(input1Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input2Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input3Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(producerBinding).isRunning()).isFalse();
|
||||
}
|
||||
}
|
||||
36
spring-cloud-stream-binder-kafka-common/pom.xml
Normal file
36
spring-cloud-stream-binder-kafka-common/pom.xml
Normal file
@@ -0,0 +1,36 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-common</name>
|
||||
<description>Kafka binder common classes</description>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.BUILD-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,255 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author David Turanski
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private String[] zkNodes = new String[] { "localhost" };
|
||||
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
|
||||
private String defaultBrokerPort = "9092";
|
||||
|
||||
private String[] headers = new String[] {};
|
||||
|
||||
private int offsetUpdateTimeWindow = 10000;
|
||||
|
||||
private int offsetUpdateCount;
|
||||
|
||||
private int offsetUpdateShutdownTimeout = 2000;
|
||||
|
||||
private int maxWait = 100;
|
||||
|
||||
private boolean autoCreateTopics = true;
|
||||
|
||||
private boolean autoAddPartitions;
|
||||
|
||||
private int socketBufferSize = 2097152;
|
||||
|
||||
/**
|
||||
* ZK session timeout in milliseconds.
|
||||
*/
|
||||
private int zkSessionTimeout = 10000;
|
||||
|
||||
/**
|
||||
* ZK Connection timeout in milliseconds.
|
||||
*/
|
||||
private int zkConnectionTimeout = 10000;
|
||||
|
||||
private int requiredAcks = 1;
|
||||
|
||||
private int replicationFactor = 1;
|
||||
|
||||
private int fetchSize = 1024 * 1024;
|
||||
|
||||
private int minPartitionCount = 1;
|
||||
|
||||
private int queueSize = 8192;
|
||||
|
||||
private String consumerGroup;
|
||||
|
||||
public String getConsumerGroup() {
|
||||
return consumerGroup;
|
||||
}
|
||||
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
|
||||
public String getKafkaConnectionString() {
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
public String[] getHeaders() {
|
||||
return headers;
|
||||
}
|
||||
|
||||
public int getOffsetUpdateTimeWindow() {
|
||||
return this.offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
public int getOffsetUpdateCount() {
|
||||
return this.offsetUpdateCount;
|
||||
}
|
||||
|
||||
public int getOffsetUpdateShutdownTimeout() {
|
||||
return this.offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
public String[] getZkNodes() {
|
||||
return zkNodes;
|
||||
}
|
||||
|
||||
public void setZkNodes(String... zkNodes) {
|
||||
this.zkNodes = zkNodes;
|
||||
}
|
||||
|
||||
public void setDefaultZkPort(String defaultZkPort) {
|
||||
this.defaultZkPort = defaultZkPort;
|
||||
}
|
||||
|
||||
public String[] getBrokers() {
|
||||
return brokers;
|
||||
}
|
||||
|
||||
public void setBrokers(String... brokers) {
|
||||
this.brokers = brokers;
|
||||
}
|
||||
|
||||
public void setDefaultBrokerPort(String defaultBrokerPort) {
|
||||
this.defaultBrokerPort = defaultBrokerPort;
|
||||
}
|
||||
|
||||
public void setHeaders(String... headers) {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
public void setOffsetUpdateTimeWindow(int offsetUpdateTimeWindow) {
|
||||
this.offsetUpdateTimeWindow = offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
public void setOffsetUpdateCount(int offsetUpdateCount) {
|
||||
this.offsetUpdateCount = offsetUpdateCount;
|
||||
}
|
||||
|
||||
public void setOffsetUpdateShutdownTimeout(int offsetUpdateShutdownTimeout) {
|
||||
this.offsetUpdateShutdownTimeout = offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
public int getZkSessionTimeout() {
|
||||
return this.zkSessionTimeout;
|
||||
}
|
||||
|
||||
public void setZkSessionTimeout(int zkSessionTimeout) {
|
||||
this.zkSessionTimeout = zkSessionTimeout;
|
||||
}
|
||||
|
||||
public int getZkConnectionTimeout() {
|
||||
return this.zkConnectionTimeout;
|
||||
}
|
||||
|
||||
public void setZkConnectionTimeout(int zkConnectionTimeout) {
|
||||
this.zkConnectionTimeout = zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an array of host values to a comma-separated String.
|
||||
*
|
||||
* It will append the default port value, if not already specified.
|
||||
*/
|
||||
private String toConnectionString(String[] hosts, String defaultPort) {
|
||||
String[] fullyFormattedHosts = new String[hosts.length];
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
if (hosts[i].contains(":") || StringUtils.isEmpty(defaultPort)) {
|
||||
fullyFormattedHosts[i] = hosts[i];
|
||||
}
|
||||
else {
|
||||
fullyFormattedHosts[i] = hosts[i] + ":" + defaultPort;
|
||||
}
|
||||
}
|
||||
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
|
||||
}
|
||||
|
||||
public int getMaxWait() {
|
||||
return maxWait;
|
||||
}
|
||||
|
||||
public void setMaxWait(int maxWait) {
|
||||
this.maxWait = maxWait;
|
||||
}
|
||||
|
||||
public int getRequiredAcks() {
|
||||
return requiredAcks;
|
||||
}
|
||||
|
||||
public void setRequiredAcks(int requiredAcks) {
|
||||
this.requiredAcks = requiredAcks;
|
||||
}
|
||||
|
||||
public int getReplicationFactor() {
|
||||
return replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(int replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
public int getFetchSize() {
|
||||
return fetchSize;
|
||||
}
|
||||
|
||||
public void setFetchSize(int fetchSize) {
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
|
||||
public int getMinPartitionCount() {
|
||||
return minPartitionCount;
|
||||
}
|
||||
|
||||
public void setMinPartitionCount(int minPartitionCount) {
|
||||
this.minPartitionCount = minPartitionCount;
|
||||
}
|
||||
|
||||
public int getQueueSize() {
|
||||
return queueSize;
|
||||
}
|
||||
|
||||
public void setQueueSize(int queueSize) {
|
||||
this.queueSize = queueSize;
|
||||
}
|
||||
|
||||
public boolean isAutoCreateTopics() {
|
||||
return autoCreateTopics;
|
||||
}
|
||||
|
||||
public void setAutoCreateTopics(boolean autoCreateTopics) {
|
||||
this.autoCreateTopics = autoCreateTopics;
|
||||
}
|
||||
|
||||
public boolean isAutoAddPartitions() {
|
||||
return autoAddPartitions;
|
||||
}
|
||||
|
||||
public void setAutoAddPartitions(boolean autoAddPartitions) {
|
||||
this.autoAddPartitions = autoAddPartitions;
|
||||
}
|
||||
|
||||
public int getSocketBufferSize() {
|
||||
return socketBufferSize;
|
||||
}
|
||||
|
||||
public void setSocketBufferSize(int socketBufferSize) {
|
||||
this.socketBufferSize = socketBufferSize;
|
||||
}
|
||||
|
||||
public void setConsumerGroup(String consumerGroup) {
|
||||
this.consumerGroup = consumerGroup;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,24 +14,19 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
/**
|
||||
* Container object for Kafka specific extended producer and consumer binding properties.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
public class KafkaBindingProperties implements BinderSpecificPropertiesProvider {
|
||||
public class KafkaBindingProperties {
|
||||
|
||||
private KafkaConsumerProperties consumer = new KafkaConsumerProperties();
|
||||
|
||||
private KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
public KafkaConsumerProperties getConsumer() {
|
||||
return this.consumer;
|
||||
return consumer;
|
||||
}
|
||||
|
||||
public void setConsumer(KafkaConsumerProperties consumer) {
|
||||
@@ -39,11 +34,10 @@ public class KafkaBindingProperties implements BinderSpecificPropertiesProvider
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
return producer;
|
||||
}
|
||||
|
||||
public void setProducer(KafkaProducerProperties producer) {
|
||||
this.producer = producer;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaConsumerProperties {
|
||||
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
private boolean resetOffsets;
|
||||
|
||||
private StartOffset startOffset;
|
||||
|
||||
private boolean enableDlq;
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
return autoCommitOffset;
|
||||
}
|
||||
|
||||
public void setAutoCommitOffset(boolean autoCommitOffset) {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
public boolean isResetOffsets() {
|
||||
return resetOffsets;
|
||||
}
|
||||
|
||||
public void setResetOffsets(boolean resetOffsets) {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
public StartOffset getStartOffset() {
|
||||
return startOffset;
|
||||
}
|
||||
|
||||
public void setStartOffset(StartOffset startOffset) {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
public boolean isEnableDlq() {
|
||||
return enableDlq;
|
||||
}
|
||||
|
||||
public void setEnableDlq(boolean enableDlq) {
|
||||
this.enableDlq = enableDlq;
|
||||
}
|
||||
|
||||
public Boolean getAutoCommitOnError() {
|
||||
return autoCommitOnError;
|
||||
}
|
||||
|
||||
public void setAutoCommitOnError(Boolean autoCommitOnError) {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
|
||||
public int getRecoveryInterval() {
|
||||
return recoveryInterval;
|
||||
}
|
||||
|
||||
public void setRecoveryInterval(int recoveryInterval) {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(-2L), latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return referencePoint;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
public Map<String, KafkaBindingProperties> getBindings() {
|
||||
return bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName) && bindings.get(channelName).getConsumer() != null) {
|
||||
return bindings.get(channelName).getConsumer();
|
||||
}
|
||||
else {
|
||||
return new KafkaConsumerProperties();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName) && bindings.get(channelName).getProducer() != null) {
|
||||
return bindings.get(channelName).getProducer();
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerProperties();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
private int bufferSize = 16384;
|
||||
|
||||
private CompressionType compressionType = CompressionType.none;
|
||||
|
||||
private boolean sync;
|
||||
|
||||
private int batchTimeout;
|
||||
|
||||
public int getBufferSize() {
|
||||
return bufferSize;
|
||||
}
|
||||
|
||||
public void setBufferSize(int bufferSize) {
|
||||
this.bufferSize = bufferSize;
|
||||
}
|
||||
|
||||
@NotNull
|
||||
public CompressionType getCompressionType() {
|
||||
return compressionType;
|
||||
}
|
||||
|
||||
public void setCompressionType(CompressionType compressionType) {
|
||||
this.compressionType = compressionType;
|
||||
}
|
||||
|
||||
public boolean isSync() {
|
||||
return sync;
|
||||
}
|
||||
|
||||
public void setSync(boolean sync) {
|
||||
this.sync = sync;
|
||||
}
|
||||
|
||||
public int getBatchTimeout() {
|
||||
return batchTimeout;
|
||||
}
|
||||
|
||||
public void setBatchTimeout(int batchTimeout) {
|
||||
this.batchTimeout = batchTimeout;
|
||||
}
|
||||
|
||||
public enum CompressionType {
|
||||
none,
|
||||
gzip,
|
||||
snappy
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Copyright 2015 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
@@ -1,5 +0,0 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
@@ -1,49 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.0.0.RC2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>https://www.spring.io</url>
|
||||
</organization>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* Contains properties for setting up an {@link AppConfigurationEntry} that can be used
|
||||
* for the Kafka or Zookeeper client.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class JaasLoginModuleConfiguration {
|
||||
|
||||
private String loginModule = "com.sun.security.auth.module.Krb5LoginModule";
|
||||
|
||||
private KafkaJaasLoginModuleInitializer.ControlFlag controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag.REQUIRED;
|
||||
|
||||
private Map<String, String> options = new HashMap<>();
|
||||
|
||||
public String getLoginModule() {
|
||||
return this.loginModule;
|
||||
}
|
||||
|
||||
public void setLoginModule(String loginModule) {
|
||||
Assert.notNull(loginModule, "cannot be null");
|
||||
this.loginModule = loginModule;
|
||||
}
|
||||
|
||||
public KafkaJaasLoginModuleInitializer.ControlFlag getControlFlag() {
|
||||
return this.controlFlag;
|
||||
}
|
||||
|
||||
public void setControlFlag(String controlFlag) {
|
||||
Assert.notNull(controlFlag, "cannot be null");
|
||||
this.controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag
|
||||
.valueOf(controlFlag.toUpperCase());
|
||||
}
|
||||
|
||||
public Map<String, String> getOptions() {
|
||||
return this.options;
|
||||
}
|
||||
|
||||
public void setOptions(Map<String, String> options) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,544 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.AssertTrue;
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties.CompressionType;
|
||||
import org.springframework.expression.Expression;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Configuration properties for the Kafka binder. The properties in this class are
|
||||
* prefixed with <b>spring.cloud.stream.kafka.binder</b>.
|
||||
*
|
||||
* @author David Turanski
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Rafal Zukowski
|
||||
* @author Aldo Sinanaj
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private static final String DEFAULT_KAFKA_CONNECTION_STRING = "localhost:9092";
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
private final KafkaProperties kafkaProperties;
|
||||
|
||||
/**
|
||||
* Arbitrary kafka properties that apply to both producers and consumers.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka consumer properties.
|
||||
*/
|
||||
private Map<String, String> consumerProperties = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka producer properties.
|
||||
*/
|
||||
private Map<String, String> producerProperties = new HashMap<>();
|
||||
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
|
||||
private String defaultBrokerPort = "9092";
|
||||
|
||||
private String[] headers = new String[] {};
|
||||
|
||||
private boolean autoCreateTopics = true;
|
||||
|
||||
private boolean autoAddPartitions;
|
||||
|
||||
private String requiredAcks = "1";
|
||||
|
||||
private short replicationFactor = 1;
|
||||
|
||||
private int minPartitionCount = 1;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
private int healthTimeout = 60;
|
||||
|
||||
private JaasLoginModuleConfiguration jaas;
|
||||
|
||||
/**
|
||||
* The bean name of a custom header mapper to use instead of a
|
||||
* {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
public KafkaBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
Assert.notNull(kafkaProperties, "'kafkaProperties' cannot be null");
|
||||
this.kafkaProperties = kafkaProperties;
|
||||
}
|
||||
|
||||
public KafkaProperties getKafkaProperties() {
|
||||
return this.kafkaProperties;
|
||||
}
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
public String getKafkaConnectionString() {
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
public String getDefaultKafkaConnectionString() {
|
||||
return DEFAULT_KAFKA_CONNECTION_STRING;
|
||||
}
|
||||
|
||||
public String[] getHeaders() {
|
||||
return this.headers;
|
||||
}
|
||||
|
||||
public String[] getBrokers() {
|
||||
return this.brokers;
|
||||
}
|
||||
|
||||
public void setBrokers(String... brokers) {
|
||||
this.brokers = brokers;
|
||||
}
|
||||
|
||||
public void setDefaultBrokerPort(String defaultBrokerPort) {
|
||||
this.defaultBrokerPort = defaultBrokerPort;
|
||||
}
|
||||
|
||||
public void setHeaders(String... headers) {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an array of host values to a comma-separated String. It will append the
|
||||
* default port value, if not already specified.
|
||||
* @param hosts host string
|
||||
* @param defaultPort port
|
||||
* @return formatted connection string
|
||||
*/
|
||||
private String toConnectionString(String[] hosts, String defaultPort) {
|
||||
String[] fullyFormattedHosts = new String[hosts.length];
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
if (hosts[i].contains(":") || StringUtils.isEmpty(defaultPort)) {
|
||||
fullyFormattedHosts[i] = hosts[i];
|
||||
}
|
||||
else {
|
||||
fullyFormattedHosts[i] = hosts[i] + ":" + defaultPort;
|
||||
}
|
||||
}
|
||||
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
|
||||
}
|
||||
|
||||
public String getRequiredAcks() {
|
||||
return this.requiredAcks;
|
||||
}
|
||||
|
||||
public void setRequiredAcks(String requiredAcks) {
|
||||
this.requiredAcks = requiredAcks;
|
||||
}
|
||||
|
||||
public short getReplicationFactor() {
|
||||
return this.replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(short replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
public int getMinPartitionCount() {
|
||||
return this.minPartitionCount;
|
||||
}
|
||||
|
||||
public void setMinPartitionCount(int minPartitionCount) {
|
||||
this.minPartitionCount = minPartitionCount;
|
||||
}
|
||||
|
||||
public int getHealthTimeout() {
|
||||
return this.healthTimeout;
|
||||
}
|
||||
|
||||
public void setHealthTimeout(int healthTimeout) {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
public boolean isAutoCreateTopics() {
|
||||
return this.autoCreateTopics;
|
||||
}
|
||||
|
||||
public void setAutoCreateTopics(boolean autoCreateTopics) {
|
||||
this.autoCreateTopics = autoCreateTopics;
|
||||
}
|
||||
|
||||
public boolean isAutoAddPartitions() {
|
||||
return this.autoAddPartitions;
|
||||
}
|
||||
|
||||
public void setAutoAddPartitions(boolean autoAddPartitions) {
|
||||
this.autoAddPartitions = autoAddPartitions;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public Map<String, String> getConsumerProperties() {
|
||||
return this.consumerProperties;
|
||||
}
|
||||
|
||||
public void setConsumerProperties(Map<String, String> consumerProperties) {
|
||||
Assert.notNull(consumerProperties, "'consumerProperties' cannot be null");
|
||||
this.consumerProperties = consumerProperties;
|
||||
}
|
||||
|
||||
public Map<String, String> getProducerProperties() {
|
||||
return this.producerProperties;
|
||||
}
|
||||
|
||||
public void setProducerProperties(Map<String, String> producerProperties) {
|
||||
Assert.notNull(producerProperties, "'producerProperties' cannot be null");
|
||||
this.producerProperties = producerProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge boot consumer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to consumers, properties from
|
||||
* {@link #setConsumerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedConsumerConfiguration() {
|
||||
Map<String, Object> consumerConfiguration = new HashMap<>();
|
||||
consumerConfiguration.putAll(this.kafkaProperties.buildConsumerProperties());
|
||||
// Copy configured binder properties that apply to consumers
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration
|
||||
.entrySet()) {
|
||||
if (ConsumerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
consumerConfiguration.put(configurationEntry.getKey(),
|
||||
configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
consumerConfiguration.putAll(this.consumerProperties);
|
||||
filterStreamManagedConfiguration(consumerConfiguration);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
return getConfigurationWithBootstrapServer(consumerConfiguration,
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge boot producer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to producers, properties from
|
||||
* {@link #setProducerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedProducerConfiguration() {
|
||||
Map<String, Object> producerConfiguration = new HashMap<>();
|
||||
producerConfiguration.putAll(this.kafkaProperties.buildProducerProperties());
|
||||
// Copy configured binder properties that apply to producers
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration
|
||||
.entrySet()) {
|
||||
if (ProducerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
producerConfiguration.put(configurationEntry.getKey(),
|
||||
configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
producerConfiguration.putAll(this.producerProperties);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
return getConfigurationWithBootstrapServer(producerConfiguration,
|
||||
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
}
|
||||
|
||||
private void filterStreamManagedConfiguration(Map<String, Object> configuration) {
|
||||
if (configuration.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)
|
||||
&& configuration.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG).equals(true)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) +
|
||||
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG + "=true is not supported by the Kafka binder");
|
||||
configuration.remove(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
if (configuration.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.GROUP_ID_CONFIG) +
|
||||
"Use spring.cloud.stream.default.group or spring.cloud.stream.binding.<name>.group to specify " +
|
||||
"the group instead of " + ConsumerConfig.GROUP_ID_CONFIG);
|
||||
configuration.remove(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
}
|
||||
|
||||
private String constructIgnoredConfigMessage(String config) {
|
||||
return String.format("Ignoring provided value(s) for '%s'. ", config);
|
||||
}
|
||||
|
||||
private Map<String, Object> getConfigurationWithBootstrapServer(
|
||||
Map<String, Object> configuration, String bootstrapServersConfig) {
|
||||
if (ObjectUtils.isEmpty(configuration.get(bootstrapServersConfig))) {
|
||||
configuration.put(bootstrapServersConfig, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = configuration.get(bootstrapServersConfig);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) configuration
|
||||
.get(bootstrapServersConfig);
|
||||
if (bootStrapServers.size() == 1
|
||||
&& bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
configuration.put(bootstrapServersConfig, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(configuration);
|
||||
}
|
||||
|
||||
public JaasLoginModuleConfiguration getJaas() {
|
||||
return this.jaas;
|
||||
}
|
||||
|
||||
public void setJaas(JaasLoginModuleConfiguration jaas) {
|
||||
this.jaas = jaas;
|
||||
}
|
||||
|
||||
public String getHeaderMapperBeanName() {
|
||||
return this.headerMapperBeanName;
|
||||
}
|
||||
|
||||
public void setHeaderMapperBeanName(String headerMapperBeanName) {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Domain class that models transaction capabilities in Kafka.
|
||||
*/
|
||||
public static class Transaction {
|
||||
|
||||
private final CombinedProducerProperties producer = new CombinedProducerProperties();
|
||||
|
||||
private String transactionIdPrefix;
|
||||
|
||||
public String getTransactionIdPrefix() {
|
||||
return this.transactionIdPrefix;
|
||||
}
|
||||
|
||||
public void setTransactionIdPrefix(String transactionIdPrefix) {
|
||||
this.transactionIdPrefix = transactionIdPrefix;
|
||||
}
|
||||
|
||||
public CombinedProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* An combination of {@link ProducerProperties} and {@link KafkaProducerProperties} so
|
||||
* that common and kafka-specific properties can be set for the transactional
|
||||
* producer.
|
||||
*
|
||||
* @since 2.1
|
||||
*/
|
||||
public static class CombinedProducerProperties {
|
||||
|
||||
private final ProducerProperties producerProperties = new ProducerProperties();
|
||||
|
||||
private final KafkaProducerProperties kafkaProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
public Expression getPartitionKeyExpression() {
|
||||
return this.producerProperties.getPartitionKeyExpression();
|
||||
}
|
||||
|
||||
public void setPartitionKeyExpression(Expression partitionKeyExpression) {
|
||||
this.producerProperties.setPartitionKeyExpression(partitionKeyExpression);
|
||||
}
|
||||
|
||||
public boolean isPartitioned() {
|
||||
return this.producerProperties.isPartitioned();
|
||||
}
|
||||
|
||||
public Expression getPartitionSelectorExpression() {
|
||||
return this.producerProperties.getPartitionSelectorExpression();
|
||||
}
|
||||
|
||||
public void setPartitionSelectorExpression(
|
||||
Expression partitionSelectorExpression) {
|
||||
this.producerProperties
|
||||
.setPartitionSelectorExpression(partitionSelectorExpression);
|
||||
}
|
||||
|
||||
public @Min(value = 1, message = "Partition count should be greater than zero.") int getPartitionCount() {
|
||||
return this.producerProperties.getPartitionCount();
|
||||
}
|
||||
|
||||
public void setPartitionCount(int partitionCount) {
|
||||
this.producerProperties.setPartitionCount(partitionCount);
|
||||
}
|
||||
|
||||
public String[] getRequiredGroups() {
|
||||
return this.producerProperties.getRequiredGroups();
|
||||
}
|
||||
|
||||
public void setRequiredGroups(String... requiredGroups) {
|
||||
this.producerProperties.setRequiredGroups(requiredGroups);
|
||||
}
|
||||
|
||||
public @AssertTrue(message = "Partition key expression and partition key extractor class properties "
|
||||
+ "are mutually exclusive.") boolean isValidPartitionKeyProperty() {
|
||||
return this.producerProperties.isValidPartitionKeyProperty();
|
||||
}
|
||||
|
||||
public @AssertTrue(message = "Partition selector class and partition selector expression "
|
||||
+ "properties are mutually exclusive.") boolean isValidPartitionSelectorProperty() {
|
||||
return this.producerProperties.isValidPartitionSelectorProperty();
|
||||
}
|
||||
|
||||
public HeaderMode getHeaderMode() {
|
||||
return this.producerProperties.getHeaderMode();
|
||||
}
|
||||
|
||||
public void setHeaderMode(HeaderMode headerMode) {
|
||||
this.producerProperties.setHeaderMode(headerMode);
|
||||
}
|
||||
|
||||
public boolean isUseNativeEncoding() {
|
||||
return this.producerProperties.isUseNativeEncoding();
|
||||
}
|
||||
|
||||
public void setUseNativeEncoding(boolean useNativeEncoding) {
|
||||
this.producerProperties.setUseNativeEncoding(useNativeEncoding);
|
||||
}
|
||||
|
||||
public boolean isErrorChannelEnabled() {
|
||||
return this.producerProperties.isErrorChannelEnabled();
|
||||
}
|
||||
|
||||
public void setErrorChannelEnabled(boolean errorChannelEnabled) {
|
||||
this.producerProperties.setErrorChannelEnabled(errorChannelEnabled);
|
||||
}
|
||||
|
||||
public String getPartitionKeyExtractorName() {
|
||||
return this.producerProperties.getPartitionKeyExtractorName();
|
||||
}
|
||||
|
||||
public void setPartitionKeyExtractorName(String partitionKeyExtractorName) {
|
||||
this.producerProperties
|
||||
.setPartitionKeyExtractorName(partitionKeyExtractorName);
|
||||
}
|
||||
|
||||
public String getPartitionSelectorName() {
|
||||
return this.producerProperties.getPartitionSelectorName();
|
||||
}
|
||||
|
||||
public void setPartitionSelectorName(String partitionSelectorName) {
|
||||
this.producerProperties.setPartitionSelectorName(partitionSelectorName);
|
||||
}
|
||||
|
||||
public int getBufferSize() {
|
||||
return this.kafkaProducerProperties.getBufferSize();
|
||||
}
|
||||
|
||||
public void setBufferSize(int bufferSize) {
|
||||
this.kafkaProducerProperties.setBufferSize(bufferSize);
|
||||
}
|
||||
|
||||
public @NotNull CompressionType getCompressionType() {
|
||||
return this.kafkaProducerProperties.getCompressionType();
|
||||
}
|
||||
|
||||
public void setCompressionType(CompressionType compressionType) {
|
||||
this.kafkaProducerProperties.setCompressionType(compressionType);
|
||||
}
|
||||
|
||||
public boolean isSync() {
|
||||
return this.kafkaProducerProperties.isSync();
|
||||
}
|
||||
|
||||
public void setSync(boolean sync) {
|
||||
this.kafkaProducerProperties.setSync(sync);
|
||||
}
|
||||
|
||||
public int getBatchTimeout() {
|
||||
return this.kafkaProducerProperties.getBatchTimeout();
|
||||
}
|
||||
|
||||
public void setBatchTimeout(int batchTimeout) {
|
||||
this.kafkaProducerProperties.setBatchTimeout(batchTimeout);
|
||||
}
|
||||
|
||||
public Expression getMessageKeyExpression() {
|
||||
return this.kafkaProducerProperties.getMessageKeyExpression();
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.kafkaProducerProperties.setMessageKeyExpression(messageKeyExpression);
|
||||
}
|
||||
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.kafkaProducerProperties.getHeaderPatterns();
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.kafkaProducerProperties.setHeaderPatterns(headerPatterns);
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.kafkaProducerProperties.getConfiguration();
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.kafkaProducerProperties.setConfiguration(configuration);
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.kafkaProducerProperties.getTopic();
|
||||
}
|
||||
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.kafkaProducerProperties.setTopic(topic);
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getExtension() {
|
||||
return this.kafkaProducerProperties;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,291 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Extended consumer properties for Kafka binder.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Aldo Sinanaj
|
||||
*
|
||||
* <p>
|
||||
* Thanks to Laszlo Szabo for providing the initial patch for generic property support.
|
||||
* </p>
|
||||
*/
|
||||
public class KafkaConsumerProperties {
|
||||
|
||||
/**
|
||||
* Enumeration for starting consumer offset.
|
||||
*/
|
||||
public enum StartOffset {
|
||||
|
||||
/**
|
||||
* Starting from earliest offset.
|
||||
*/
|
||||
earliest(-2L),
|
||||
/**
|
||||
* Starting from latest offset.
|
||||
*/
|
||||
latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Standard headers for the message.
|
||||
*/
|
||||
public enum StandardHeaders {
|
||||
|
||||
/**
|
||||
* No headers.
|
||||
*/
|
||||
none,
|
||||
/**
|
||||
* Message header representing ID.
|
||||
*/
|
||||
id,
|
||||
/**
|
||||
* Message header representing timestamp.
|
||||
*/
|
||||
timestamp,
|
||||
/**
|
||||
* Indicating both ID and timestamp headers.
|
||||
*/
|
||||
both
|
||||
|
||||
}
|
||||
|
||||
private boolean ackEachRecord;
|
||||
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
private StartOffset startOffset;
|
||||
|
||||
private boolean resetOffsets;
|
||||
|
||||
private boolean enableDlq;
|
||||
|
||||
private String dlqName;
|
||||
|
||||
private Integer dlqPartitions;
|
||||
|
||||
private KafkaProducerProperties dlqProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
private String[] trustedPackages;
|
||||
|
||||
private StandardHeaders standardHeaders = StandardHeaders.none;
|
||||
|
||||
private String converterBeanName;
|
||||
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
private boolean destinationIsPattern;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
/**
|
||||
* Timeout used for polling in pollable consumers.
|
||||
*/
|
||||
private long pollTimeout = org.springframework.kafka.listener.ConsumerProperties.DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
public boolean isAckEachRecord() {
|
||||
return this.ackEachRecord;
|
||||
}
|
||||
|
||||
public void setAckEachRecord(boolean ackEachRecord) {
|
||||
this.ackEachRecord = ackEachRecord;
|
||||
}
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
return this.autoCommitOffset;
|
||||
}
|
||||
|
||||
public void setAutoCommitOffset(boolean autoCommitOffset) {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
public StartOffset getStartOffset() {
|
||||
return this.startOffset;
|
||||
}
|
||||
|
||||
public void setStartOffset(StartOffset startOffset) {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
|
||||
public void setResetOffsets(boolean resetOffsets) {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
public boolean isEnableDlq() {
|
||||
return this.enableDlq;
|
||||
}
|
||||
|
||||
public void setEnableDlq(boolean enableDlq) {
|
||||
this.enableDlq = enableDlq;
|
||||
}
|
||||
|
||||
public Boolean getAutoCommitOnError() {
|
||||
return this.autoCommitOnError;
|
||||
}
|
||||
|
||||
public void setAutoCommitOnError(Boolean autoCommitOnError) {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the interval.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
public int getRecoveryInterval() {
|
||||
return this.recoveryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param recoveryInterval the interval.
|
||||
* @deprecated No longer needed by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
public void setRecoveryInterval(int recoveryInterval) {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
|
||||
public boolean isAutoRebalanceEnabled() {
|
||||
return this.autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public void setAutoRebalanceEnabled(boolean autoRebalanceEnabled) {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public String getDlqName() {
|
||||
return this.dlqName;
|
||||
}
|
||||
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
public Integer getDlqPartitions() {
|
||||
return this.dlqPartitions;
|
||||
}
|
||||
|
||||
public void setDlqPartitions(Integer dlqPartitions) {
|
||||
this.dlqPartitions = dlqPartitions;
|
||||
}
|
||||
|
||||
public String[] getTrustedPackages() {
|
||||
return this.trustedPackages;
|
||||
}
|
||||
|
||||
public void setTrustedPackages(String[] trustedPackages) {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getDlqProducerProperties() {
|
||||
return this.dlqProducerProperties;
|
||||
}
|
||||
|
||||
public void setDlqProducerProperties(KafkaProducerProperties dlqProducerProperties) {
|
||||
this.dlqProducerProperties = dlqProducerProperties;
|
||||
}
|
||||
|
||||
public StandardHeaders getStandardHeaders() {
|
||||
return this.standardHeaders;
|
||||
}
|
||||
|
||||
public void setStandardHeaders(StandardHeaders standardHeaders) {
|
||||
this.standardHeaders = standardHeaders;
|
||||
}
|
||||
|
||||
public String getConverterBeanName() {
|
||||
return this.converterBeanName;
|
||||
}
|
||||
|
||||
public void setConverterBeanName(String converterBeanName) {
|
||||
this.converterBeanName = converterBeanName;
|
||||
}
|
||||
|
||||
public long getIdleEventInterval() {
|
||||
return this.idleEventInterval;
|
||||
}
|
||||
|
||||
public void setIdleEventInterval(long idleEventInterval) {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
public boolean isDestinationIsPattern() {
|
||||
return this.destinationIsPattern;
|
||||
}
|
||||
|
||||
public void setDestinationIsPattern(boolean destinationIsPattern) {
|
||||
this.destinationIsPattern = destinationIsPattern;
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
public long getPollTimeout() {
|
||||
return this.pollTimeout;
|
||||
}
|
||||
|
||||
public void setPollTimeout(long pollTimeout) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.AbstractExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
|
||||
/**
|
||||
* Kafka specific extended binding properties class that extends from
|
||||
* {@link AbstractExtendedBindingProperties}.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties extends
|
||||
AbstractExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties, KafkaBindingProperties> {
|
||||
|
||||
private static final String DEFAULTS_PREFIX = "spring.cloud.stream.kafka.default";
|
||||
|
||||
@Override
|
||||
public String getDefaultsPrefix() {
|
||||
return DEFAULTS_PREFIX;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, KafkaBindingProperties> getBindings() {
|
||||
return this.doGetBindings();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
|
||||
return KafkaBindingProperties.class;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
/**
|
||||
* Extended producer properties for Kafka binder.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
* @author Aldo Sinanaj
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
private int bufferSize = 16384;
|
||||
|
||||
private CompressionType compressionType = CompressionType.none;
|
||||
|
||||
private boolean sync;
|
||||
|
||||
private Expression sendTimeoutExpression;
|
||||
|
||||
private int batchTimeout;
|
||||
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
private String[] headerPatterns;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
private boolean useTopicHeader;
|
||||
|
||||
private String recordMetadataChannel;
|
||||
|
||||
public int getBufferSize() {
|
||||
return this.bufferSize;
|
||||
}
|
||||
|
||||
public void setBufferSize(int bufferSize) {
|
||||
this.bufferSize = bufferSize;
|
||||
}
|
||||
|
||||
@NotNull
|
||||
public CompressionType getCompressionType() {
|
||||
return this.compressionType;
|
||||
}
|
||||
|
||||
public void setCompressionType(CompressionType compressionType) {
|
||||
this.compressionType = compressionType;
|
||||
}
|
||||
|
||||
public boolean isSync() {
|
||||
return this.sync;
|
||||
}
|
||||
|
||||
public void setSync(boolean sync) {
|
||||
this.sync = sync;
|
||||
}
|
||||
|
||||
public Expression getSendTimeoutExpression() {
|
||||
return this.sendTimeoutExpression;
|
||||
}
|
||||
|
||||
public void setSendTimeoutExpression(Expression sendTimeoutExpression) {
|
||||
this.sendTimeoutExpression = sendTimeoutExpression;
|
||||
}
|
||||
|
||||
public int getBatchTimeout() {
|
||||
return this.batchTimeout;
|
||||
}
|
||||
|
||||
public void setBatchTimeout(int batchTimeout) {
|
||||
this.batchTimeout = batchTimeout;
|
||||
}
|
||||
|
||||
public Expression getMessageKeyExpression() {
|
||||
return this.messageKeyExpression;
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.headerPatterns;
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.headerPatterns = headerPatterns;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
public boolean isUseTopicHeader() {
|
||||
return this.useTopicHeader;
|
||||
}
|
||||
|
||||
public void setUseTopicHeader(boolean useTopicHeader) {
|
||||
this.useTopicHeader = useTopicHeader;
|
||||
}
|
||||
|
||||
public String getRecordMetadataChannel() {
|
||||
return this.recordMetadataChannel;
|
||||
}
|
||||
|
||||
public void setRecordMetadataChannel(String recordMetadataChannel) {
|
||||
this.recordMetadataChannel = recordMetadataChannel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enumeration for compression types.
|
||||
*/
|
||||
public enum CompressionType {
|
||||
|
||||
/**
|
||||
* No compression.
|
||||
*/
|
||||
none,
|
||||
|
||||
/**
|
||||
* gzip based compression.
|
||||
*/
|
||||
gzip,
|
||||
|
||||
/**
|
||||
* snappy based compression.
|
||||
*/
|
||||
snappy,
|
||||
|
||||
/**
|
||||
* lz4 compression.
|
||||
*/
|
||||
lz4,
|
||||
|
||||
// /** // TODO: uncomment and fix docs when kafka-clients 2.1.0 or newer is the
|
||||
// default
|
||||
// * zstd compression
|
||||
// */
|
||||
// zstd
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Properties for configuring topics.
|
||||
*
|
||||
* @author Aldo Sinanaj
|
||||
* @since 2.2
|
||||
*
|
||||
*/
|
||||
public class KafkaTopicProperties {
|
||||
|
||||
private Short replicationFactor;
|
||||
|
||||
private Map<Integer, List<Integer>> replicasAssignments = new HashMap<>();
|
||||
|
||||
private Map<String, String> properties = new HashMap<>();
|
||||
|
||||
public Short getReplicationFactor() {
|
||||
return replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(Short replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
public Map<Integer, List<Integer>> getReplicasAssignments() {
|
||||
return replicasAssignments;
|
||||
}
|
||||
|
||||
public void setReplicasAssignments(Map<Integer, List<Integer>> replicasAssignments) {
|
||||
this.replicasAssignments = replicasAssignments;
|
||||
}
|
||||
|
||||
public Map<String, String> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
public void setProperties(Map<String, String> properties) {
|
||||
this.properties = properties;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,584 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.CreatePartitionsResult;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeTopicsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.errors.TopicExistsException;
|
||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaTopicProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka implementation for {@link ProvisioningProvider}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Simon Flandergan
|
||||
* @author Oleg Zhurakousky
|
||||
* @author Aldo Sinanaj
|
||||
*/
|
||||
public class KafkaTopicProvisioner implements
|
||||
// @checkstyle:off
|
||||
ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>>,
|
||||
// @checkstyle:on
|
||||
InitializingBean {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KafkaTopicProvisioner.class);
|
||||
|
||||
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
|
||||
private final Map<String, Object> adminClientProperties;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
public KafkaTopicProvisioner(
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
normalalizeBootPropsWithBinder(this.adminClientProperties, kafkaProperties,
|
||||
kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mutator for metadata retry operations.
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
this.metadataRetryOperations = metadataRetryOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
|
||||
simpleRetryPolicy.setMaxAttempts(10);
|
||||
retryTemplate.setRetryPolicy(simpleRetryPolicy);
|
||||
|
||||
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
|
||||
backOffPolicy.setInitialInterval(100);
|
||||
backOffPolicy.setMultiplier(2);
|
||||
backOffPolicy.setMaxInterval(1000);
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
this.metadataRetryOperations = retryTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerDestination provisionProducerDestination(final String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
try (AdminClient adminClient = AdminClient.create(this.adminClientProperties)) {
|
||||
createTopic(adminClient, name, properties.getPartitionCount(), false,
|
||||
properties.getExtension().getTopic());
|
||||
int partitions = 0;
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
|
||||
Map<String, TopicDescription> topicDescriptions = null;
|
||||
try {
|
||||
topicDescriptions = all.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException(
|
||||
"Problems encountered with partitions finding", ex);
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
partitions = topicDescription.partitions().size();
|
||||
}
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name,
|
||||
final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
if (!properties.isMultiplex()) {
|
||||
return doProvisionConsumerDestination(name, group, properties);
|
||||
}
|
||||
else {
|
||||
String[] destinations = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String destination : destinations) {
|
||||
doProvisionConsumerDestination(destination.trim(), group, properties);
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerDestination doProvisionConsumerDestination(final String name,
|
||||
final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
|
||||
if (properties.getExtension().isDestinationIsPattern()) {
|
||||
Assert.isTrue(!properties.getExtension().isEnableDlq(),
|
||||
"enableDLQ is not allowed when listening to topic patterns");
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug("Listening to a topic pattern - " + name
|
||||
+ " - no provisioning performed");
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
if (properties.getInstanceCount() == 0) {
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
ConsumerDestination consumerDestination = new KafkaConsumerDestination(name);
|
||||
try (AdminClient adminClient = createAdminClient()) {
|
||||
createTopic(adminClient, name, partitionCount,
|
||||
properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getTopic());
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
consumerDestination = createDlqIfNeedBe(adminClient, name, group,
|
||||
properties, anonymous, partitions);
|
||||
if (consumerDestination == null) {
|
||||
consumerDestination = new KafkaConsumerDestination(name,
|
||||
partitions);
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException("provisioning exception", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
return consumerDestination;
|
||||
}
|
||||
|
||||
AdminClient createAdminClient() {
|
||||
return AdminClient.create(this.adminClientProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* In general, binder properties supersede boot kafka properties. The one exception is
|
||||
* the bootstrap servers. In that case, we should only override the boot properties if
|
||||
* (there is a binder property AND it is a non-default value) OR (if there is no boot
|
||||
* property); this is needed because the binder property never returns a null value.
|
||||
* @param adminProps the admin properties to normalize.
|
||||
* @param bootProps the boot kafka properties.
|
||||
* @param binderProps the binder kafka properties.
|
||||
*/
|
||||
public static void normalalizeBootPropsWithBinder(Map<String, Object> adminProps,
|
||||
KafkaProperties bootProps, KafkaBinderConfigurationProperties binderProps) {
|
||||
// First deal with the outlier
|
||||
String kafkaConnectionString = binderProps.getKafkaConnectionString();
|
||||
if (ObjectUtils
|
||||
.isEmpty(adminProps.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString
|
||||
.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
}
|
||||
// Now override any boot values with binder values
|
||||
Map<String, String> binderProperties = binderProps.getConfiguration();
|
||||
Set<String> adminConfigNames = AdminClientConfig.configNames();
|
||||
binderProperties.forEach((key, value) -> {
|
||||
if (key.equals(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
throw new IllegalStateException(
|
||||
"Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
if (adminConfigNames.contains(key)) {
|
||||
Object replaced = adminProps.put(key, value);
|
||||
if (replaced != null && KafkaTopicProvisioner.logger.isDebugEnabled()) {
|
||||
KafkaTopicProvisioner.logger.debug("Overrode boot property: [" + key + "], from: ["
|
||||
+ replaced + "] to: [" + value + "]");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private ConsumerDestination createDlqIfNeedBe(AdminClient adminClient, String name,
|
||||
String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName())
|
||||
? properties.getExtension().getDlqName()
|
||||
: "error." + name + "." + group;
|
||||
int dlqPartitions = properties.getExtension().getDlqPartitions() == null
|
||||
? partitions
|
||||
: properties.getExtension().getDlqPartitions();
|
||||
try {
|
||||
createTopicAndPartitions(adminClient, dlqTopic, dlqPartitions,
|
||||
properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getTopic());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createTopic(AdminClient adminClient, String name, int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaTopicProperties properties) {
|
||||
try {
|
||||
createTopicIfNecessary(adminClient, name, partitionCount,
|
||||
tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
// TODO: Remove catching Throwable. See this thread:
|
||||
// TODO:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/pull/514#discussion_r241075940
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
// TODO:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/pull/514#discussion_r241075940
|
||||
throw new ProvisioningException("Provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNecessary(AdminClient adminClient, final String topicName,
|
||||
final int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaTopicProperties properties) throws Throwable {
|
||||
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
createTopicAndPartitions(adminClient, topicName, partitionCount,
|
||||
tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
else {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
* @param adminClient kafka admin client
|
||||
* @param topicName topic name
|
||||
* @param partitionCount partition count
|
||||
* @param tolerateLowerPartitionsOnBroker whether lower partitions count on broker is
|
||||
* tolerated ot not
|
||||
* @param topicProperties kafka topic properties
|
||||
* @throws Throwable from topic creation
|
||||
*/
|
||||
private void createTopicAndPartitions(AdminClient adminClient, final String topicName,
|
||||
final int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaTopicProperties topicProperties) throws Throwable {
|
||||
|
||||
ListTopicsResult listTopicsResult = adminClient.listTopics();
|
||||
KafkaFuture<Set<String>> namesFutures = listTopicsResult.names();
|
||||
|
||||
Set<String> names = namesFutures.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
if (names.contains(topicName)) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties
|
||||
.isAutoAddPartitions()
|
||||
? Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount)
|
||||
: partitionCount;
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult
|
||||
.all();
|
||||
Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(topicName);
|
||||
int partitionSize = topicDescription.partitions().size();
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
CreatePartitionsResult partitions = adminClient
|
||||
.createPartitions(Collections.singletonMap(topicName,
|
||||
NewPartitions.increaseTo(effectivePartitionCount)));
|
||||
partitions.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
this.logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
+ (effectivePartitionCount - partitionSize)
|
||||
+ " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException(
|
||||
"The number of expected partitions was: " + partitionCount
|
||||
+ ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling "
|
||||
+ "`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(), partitionCount);
|
||||
this.metadataRetryOperations.execute((context) -> {
|
||||
|
||||
NewTopic newTopic;
|
||||
Map<Integer, List<Integer>> replicasAssignments = topicProperties
|
||||
.getReplicasAssignments();
|
||||
if (replicasAssignments != null && replicasAssignments.size() > 0) {
|
||||
newTopic = new NewTopic(topicName,
|
||||
topicProperties.getReplicasAssignments());
|
||||
}
|
||||
else {
|
||||
newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
topicProperties.getReplicationFactor() != null
|
||||
? topicProperties.getReplicationFactor()
|
||||
: this.configurationProperties
|
||||
.getReplicationFactor());
|
||||
}
|
||||
if (topicProperties.getProperties().size() > 0) {
|
||||
newTopic.configs(topicProperties.getProperties());
|
||||
}
|
||||
CreateTopicsResult createTopicsResult = adminClient
|
||||
.createTopics(Collections.singletonList(newTopic));
|
||||
try {
|
||||
createTopicsResult.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
if (ex instanceof ExecutionException) {
|
||||
if (ex.getCause() instanceof TopicExistsException) {
|
||||
if (this.logger.isWarnEnabled()) {
|
||||
this.logger.warn("Attempt to create topic: " + topicName
|
||||
+ ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable, final String topicName) {
|
||||
try {
|
||||
return this.metadataRetryOperations.execute((context) -> {
|
||||
Collection<PartitionInfo> partitions = Collections.emptyList();
|
||||
|
||||
try {
|
||||
// This call may return null or throw an exception.
|
||||
partitions = callable.call();
|
||||
}
|
||||
catch (Exception ex) {
|
||||
// The above call can potentially throw exceptions such as timeout. If
|
||||
// we can determine
|
||||
// that the exception was due to an unknown topic on the broker, just
|
||||
// simply rethrow that.
|
||||
if (ex instanceof UnknownTopicOrPartitionException) {
|
||||
throw ex;
|
||||
}
|
||||
this.logger.error("Failed to obtain partition information", ex);
|
||||
}
|
||||
// In some cases, the above partition query may not throw an UnknownTopic..Exception for various reasons.
|
||||
// For that, we are forcing another query to ensure that the topic is present on the server.
|
||||
if (CollectionUtils.isEmpty(partitions)) {
|
||||
try (AdminClient adminClient = AdminClient
|
||||
.create(this.adminClientProperties)) {
|
||||
final DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
|
||||
describeTopicsResult.all().get();
|
||||
}
|
||||
catch (ExecutionException ex) {
|
||||
if (ex.getCause() instanceof UnknownTopicOrPartitionException) {
|
||||
throw (UnknownTopicOrPartitionException) ex.getCause();
|
||||
}
|
||||
else {
|
||||
logger.warn("No partitions have been retrieved for the topic "
|
||||
+ "(" + topicName
|
||||
+ "). This will affect the health check.");
|
||||
}
|
||||
}
|
||||
}
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = CollectionUtils.isEmpty(partitions) ? 0 : partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
this.logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
+ (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(
|
||||
"The number of expected partitions was: " + partitionCount
|
||||
+ ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
this.logger.error("Cannot initialize Binder", ex);
|
||||
throw new BinderException("Cannot initialize binder:", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KafkaProducerDestination implements ProducerDestination {
|
||||
|
||||
private final String producerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
KafkaProducerDestination(String destinationName, Integer partitions) {
|
||||
this.producerDestinationName = destinationName;
|
||||
this.partitions = partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return this.producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNameForPartition(int partition) {
|
||||
return this.producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaProducerDestination{" + "producerDestinationName='"
|
||||
+ producerDestinationName + '\'' + ", partitions=" + partitions + '}';
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static final class KafkaConsumerDestination implements ConsumerDestination {
|
||||
|
||||
private final String consumerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
private final String dlqName;
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName) {
|
||||
this(consumerDestinationName, 0, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, int partitions) {
|
||||
this(consumerDestinationName, partitions, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions,
|
||||
String dlqName) {
|
||||
this.consumerDestinationName = consumerDestinationName;
|
||||
this.partitions = partitions;
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return this.consumerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" + "consumerDestinationName='"
|
||||
+ consumerDestinationName + '\'' + ", partitions=" + partitions
|
||||
+ ", dlqName='" + dlqName + '\'' + '}';
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
||||
import org.springframework.lang.Nullable;
|
||||
|
||||
/**
|
||||
* A TriFunction that takes a consumer group, consumer record, and throwable and returns
|
||||
* which partition to publish to the dead letter topic. Returning {@code null} means Kafka
|
||||
* will choose the partition.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 3.0
|
||||
*
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface DlqPartitionFunction {
|
||||
|
||||
/**
|
||||
* Returns the same partition as the original recor.
|
||||
*/
|
||||
DlqPartitionFunction ORIGINAL_PARTITION = (group, rec, ex) -> rec.partition();
|
||||
|
||||
/**
|
||||
* Returns 0.
|
||||
*/
|
||||
DlqPartitionFunction PARTITION_ZERO = (group, rec, ex) -> 0;
|
||||
|
||||
/**
|
||||
* Apply the function.
|
||||
* @param group the consumer group.
|
||||
* @param record the consumer record.
|
||||
* @param throwable the exception.
|
||||
* @return the DLQ partition, or null.
|
||||
*/
|
||||
@Nullable
|
||||
Integer apply(String group, ConsumerRecord<?, ?> record, Throwable throwable);
|
||||
|
||||
/**
|
||||
* Determine the fallback function to use based on the dlq partition count if no
|
||||
* {@link DlqPartitionFunction} bean is provided.
|
||||
* @param dlqPartitions the partition count.
|
||||
* @param logger the logger.
|
||||
* @return the fallback.
|
||||
*/
|
||||
static DlqPartitionFunction determineFallbackFunction(@Nullable Integer dlqPartitions, Log logger) {
|
||||
if (dlqPartitions == null) {
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else if (dlqPartitions > 1) {
|
||||
logger.error("'dlqPartitions' is > 1 but a custom DlqPartitionFunction bean is not provided");
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else {
|
||||
return PARTITION_ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
||||
/**
|
||||
* Utility methods releated to Kafka topics.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public final class KafkaTopicUtils {
|
||||
|
||||
private KafkaTopicUtils() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate topic name. Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
|
||||
* @param topicName name of the topic
|
||||
*/
|
||||
public static void validateTopicName(String topicName) {
|
||||
try {
|
||||
byte[] utf8 = topicName.getBytes("UTF-8");
|
||||
for (byte b : utf8) {
|
||||
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z')
|
||||
|| (b >= '0') && (b <= '9') || (b == '.') || (b == '-')
|
||||
|| (b == '_'))) {
|
||||
throw new IllegalArgumentException(
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-', but was: '"
|
||||
+ topicName + "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (UnsupportedEncodingException ex) {
|
||||
throw new AssertionError(ex); // Can't happen
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaBinderConfigurationPropertiesTest {
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setGroupId("group1");
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setEnableAutoCommit(true);
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConsumerProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConsumerProps() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
import org.apache.kafka.common.network.SslChannelBuilder;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.fail;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaTopicProvisionerTests {
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenExceptServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
"PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:1234"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG,
|
||||
"SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:9092");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(
|
||||
((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0))
|
||||
.isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenIncludingServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
"PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:9092"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG,
|
||||
"SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:1234");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(
|
||||
((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0))
|
||||
.isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void brokersInvalid() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
|
||||
"localhost:1234");
|
||||
try {
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
fail("Expected illegal state");
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage()).isEqualTo(
|
||||
"Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
Binary file not shown.
@@ -1,124 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-streams</name>
|
||||
<description>Kafka Streams Binder Implementation</description>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.0.0.RC2</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<avro.version>1.8.2</avro.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-actuator</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
</dependency>
|
||||
<!-- Added back since Kafka still depends on it, but it has been removed by Boot due to EOL -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<!-- Following dependencies are needed to support Kafka 1.1.0 client-->
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.12</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.12</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<!-- Following dependencies are only provided for testing and won't be packaged with the binder apps-->
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-schema-registry-client</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro</artifactId>
|
||||
<version>${avro.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-maven-plugin</artifactId>
|
||||
<version>${avro.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>generate-test-sources</phase>
|
||||
<goals>
|
||||
<goal>schema</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.basedir}/target/generated-test-sources</outputDirectory>
|
||||
<testOutputDirectory>${project.basedir}/target/generated-test-sources</testOutputDirectory>
|
||||
<testSourceDirectory>${project.basedir}/src/test/resources/avro</testSourceDirectory>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,376 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.Topology;
|
||||
import org.apache.kafka.streams.kstream.Consumed;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.processor.TimestampExtractor;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.config.KafkaStreamsConfiguration;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanCustomizer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public abstract class AbstractKafkaStreamsBinderProcessor implements ApplicationContextAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(AbstractKafkaStreamsBinderProcessor.class);
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
private final CleanupConfig cleanupConfig;
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
protected ConfigurableApplicationContext applicationContext;
|
||||
|
||||
public AbstractKafkaStreamsBinderProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver, CleanupConfig cleanupConfig) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.cleanupConfig = cleanupConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void setApplicationContext(ApplicationContext applicationContext)
|
||||
throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
|
||||
protected Topology.AutoOffsetReset getAutoOffsetReset(String inboundName, KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
final KafkaConsumerProperties.StartOffset startOffset = extendedConsumerProperties
|
||||
.getStartOffset();
|
||||
Topology.AutoOffsetReset autoOffsetReset = null;
|
||||
if (startOffset != null) {
|
||||
switch (startOffset) {
|
||||
case earliest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.EARLIEST;
|
||||
break;
|
||||
case latest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.LATEST;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (extendedConsumerProperties.isResetOffsets()) {
|
||||
AbstractKafkaStreamsBinderProcessor.LOG.warn("Detected resetOffsets configured on binding "
|
||||
+ inboundName + ". "
|
||||
+ "Setting resetOffsets in Kafka Streams binder does not have any effect.");
|
||||
}
|
||||
return autoOffsetReset;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void handleKTableGlobalKTableInputs(Object[] arguments, int index, String input, Class<?> parameterType, Object targetBean,
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean, StreamsBuilder streamsBuilder,
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
KTable<?, ?> table = getKTable(extendedConsumerProperties, streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
KTableBoundElementFactory.KTableWrapper kTableWrapper =
|
||||
(KTableBoundElementFactory.KTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
kTableWrapper.wrap((KTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
arguments[index] = table;
|
||||
}
|
||||
else if (parameterType.isAssignableFrom(GlobalKTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
GlobalKTable<?, ?> table = getGlobalKTable(extendedConsumerProperties, streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapper globalKTableWrapper =
|
||||
(GlobalKTableBoundElementFactory.GlobalKTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
globalKTableWrapper.wrap((GlobalKTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
arguments[index] = table;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
protected StreamsBuilderFactoryBean buildStreamsBuilderAndRetrieveConfig(String beanNamePostPrefix,
|
||||
ApplicationContext applicationContext, String inboundName,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
StreamsBuilderFactoryBeanCustomizer customizer) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext
|
||||
.getBeanFactory();
|
||||
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext
|
||||
.getBean("streamConfigGlobalProperties", Map.class);
|
||||
|
||||
if (kafkaStreamsBinderConfigurationProperties != null) {
|
||||
final Map<String, KafkaStreamsBinderConfigurationProperties.Functions> functionConfigMap = kafkaStreamsBinderConfigurationProperties.getFunctions();
|
||||
if (!CollectionUtils.isEmpty(functionConfigMap)) {
|
||||
final KafkaStreamsBinderConfigurationProperties.Functions functionConfig = functionConfigMap.get(beanNamePostPrefix);
|
||||
final Map<String, String> functionSpecificConfig = functionConfig.getConfiguration();
|
||||
if (!CollectionUtils.isEmpty(functionSpecificConfig)) {
|
||||
streamConfigGlobalProperties.putAll(functionSpecificConfig);
|
||||
}
|
||||
|
||||
String applicationId = functionConfig.getApplicationId();
|
||||
if (!StringUtils.isEmpty(applicationId)) {
|
||||
streamConfigGlobalProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//this is only used primarily for StreamListener based processors. Although in theory, functions can use it,
|
||||
//it is ideal for functions to use the approach used in the above if statement by using a property like
|
||||
//spring.cloud.stream.kafka.streams.binder.functions.process.configuration.num.threads (assuming that process is the function name).
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName);
|
||||
streamConfigGlobalProperties
|
||||
.putAll(extendedConsumerProperties.getConfiguration());
|
||||
|
||||
String bindingLevelApplicationId = extendedConsumerProperties.getApplicationId();
|
||||
// override application.id if set at the individual binding level.
|
||||
// We provide this for backward compatibility with StreamListener based processors.
|
||||
// For function based processors see the approach used above
|
||||
// (i.e. use a property like spring.cloud.stream.kafka.streams.binder.functions.process.applicationId).
|
||||
if (StringUtils.hasText(bindingLevelApplicationId)) {
|
||||
streamConfigGlobalProperties.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
bindingLevelApplicationId);
|
||||
}
|
||||
|
||||
//If the application id is not set by any mechanism, then generate it.
|
||||
streamConfigGlobalProperties.computeIfAbsent(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
k -> {
|
||||
String generatedApplicationID = beanNamePostPrefix + "-applicationId";
|
||||
LOG.info("Binder Generated Kafka Streams Application ID: " + generatedApplicationID);
|
||||
LOG.info("Use the binder generated application ID only for development and testing. ");
|
||||
LOG.info("For production deployments, please consider explicitly setting an application ID using a configuration property.");
|
||||
LOG.info("The generated applicationID is static and will be preserved over application restarts.");
|
||||
return generatedApplicationID;
|
||||
});
|
||||
|
||||
int concurrency = this.bindingServiceProperties.getConsumerProperties(inboundName)
|
||||
.getConcurrency();
|
||||
// override concurrency if set at the individual binding level.
|
||||
if (concurrency > 1) {
|
||||
streamConfigGlobalProperties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG,
|
||||
concurrency);
|
||||
}
|
||||
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration = new KafkaStreamsConfiguration(streamConfigGlobalProperties);
|
||||
|
||||
StreamsBuilderFactoryBean streamsBuilder = this.cleanupConfig == null
|
||||
? new StreamsBuilderFactoryBean(kafkaStreamsConfiguration)
|
||||
: new StreamsBuilderFactoryBean(kafkaStreamsConfiguration,
|
||||
this.cleanupConfig);
|
||||
if (customizer != null) {
|
||||
customizer.configure(streamsBuilder);
|
||||
}
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
BeanDefinition streamsBuilderBeanDefinition = BeanDefinitionBuilder
|
||||
.genericBeanDefinition(
|
||||
(Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(),
|
||||
() -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition(
|
||||
"stream-builder-" + beanNamePostPrefix, streamsBuilderBeanDefinition);
|
||||
|
||||
extendedConsumerProperties.setApplicationId((String) streamConfigGlobalProperties.get(StreamsConfig.APPLICATION_ID_CONFIG));
|
||||
//Removing the application ID from global properties so that the next function won't re-use it and cause race conditions.
|
||||
streamConfigGlobalProperties.remove(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
|
||||
return applicationContext.getBean(
|
||||
"&stream-builder-" + beanNamePostPrefix, StreamsBuilderFactoryBean.class);
|
||||
}
|
||||
|
||||
protected Serde<?> getValueSerde(String inboundName, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, ResolvableType resolvableType) {
|
||||
if (bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding()) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties
|
||||
.getBindingProperties(inboundName);
|
||||
return this.keyValueSerdeResolver.getInboundValueSerde(
|
||||
bindingProperties.getConsumer(), kafkaStreamsConsumerProperties, resolvableType);
|
||||
}
|
||||
else {
|
||||
return Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
protected KStream<?, ?> getKStream(String inboundName, BindingProperties bindingProperties, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
|
||||
if (firstBuild) {
|
||||
addStateStoreBeans(streamsBuilder);
|
||||
}
|
||||
|
||||
String[] bindingTargets = StringUtils.commaDelimitedListToStringArray(
|
||||
this.bindingServiceProperties.getBindingDestination(inboundName));
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset);
|
||||
KStream<?, ?> stream = streamsBuilder.stream(Arrays.asList(bindingTargets),
|
||||
consumed);
|
||||
final boolean nativeDecoding = this.bindingServiceProperties
|
||||
.getConsumerProperties(inboundName).isUseNativeDecoding();
|
||||
if (nativeDecoding) {
|
||||
LOG.info("Native decoding is enabled for " + inboundName
|
||||
+ ". Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName
|
||||
+ ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
|
||||
return getkStream(bindingProperties, stream, nativeDecoding);
|
||||
}
|
||||
|
||||
private KStream<?, ?> getkStream(BindingProperties bindingProperties, KStream<?, ?> stream, boolean nativeDecoding) {
|
||||
if (!nativeDecoding) {
|
||||
stream = stream.mapValues((value) -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (value != null && !StringUtils.isEmpty(contentType)) {
|
||||
returnValue = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
}
|
||||
else {
|
||||
returnValue = value;
|
||||
}
|
||||
return returnValue;
|
||||
});
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private void addStateStoreBeans(StreamsBuilder streamsBuilder) {
|
||||
try {
|
||||
final Map<String, StoreBuilder> storeBuilders = applicationContext.getBeansOfType(StoreBuilder.class);
|
||||
if (!CollectionUtils.isEmpty(storeBuilders)) {
|
||||
storeBuilders.values().forEach(storeBuilder -> {
|
||||
streamsBuilder.addStateStore(storeBuilder);
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("state store " + storeBuilder.name() + " added to topology");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Pass through.
|
||||
}
|
||||
}
|
||||
|
||||
private <K, V> KTable<K, V> materializedAs(StreamsBuilder streamsBuilder, String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v, Topology.AutoOffsetReset autoOffsetReset, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
|
||||
final Consumed<K, V> consumed = getConsumed(kafkaStreamsConsumerProperties, k, v, autoOffsetReset);
|
||||
return streamsBuilder.table(this.bindingServiceProperties.getBindingDestination(destination),
|
||||
consumed, getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private <K, V> Materialized<K, V, KeyValueStore<Bytes, byte[]>> getMaterialized(
|
||||
String storeName, Serde<K> k, Serde<V> v) {
|
||||
return Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k).withValueSerde(v);
|
||||
}
|
||||
|
||||
private <K, V> GlobalKTable<K, V> materializedAsGlobalKTable(
|
||||
StreamsBuilder streamsBuilder, String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v, Topology.AutoOffsetReset autoOffsetReset, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
final Consumed<K, V> consumed = getConsumed(kafkaStreamsConsumerProperties, k, v, autoOffsetReset);
|
||||
return streamsBuilder.globalTable(
|
||||
this.bindingServiceProperties.getBindingDestination(destination),
|
||||
consumed,
|
||||
getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private GlobalKTable<?, ?> getGlobalKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, String materializedAs,
|
||||
String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset);
|
||||
return materializedAs != null
|
||||
? materializedAsGlobalKTable(streamsBuilder, bindingDestination,
|
||||
materializedAs, keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties)
|
||||
: streamsBuilder.globalTable(bindingDestination,
|
||||
consumed);
|
||||
}
|
||||
|
||||
private KTable<?, ?> getKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder, Serde<?> keySerde,
|
||||
Serde<?> valueSerde, String materializedAs, String bindingDestination,
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset);
|
||||
return materializedAs != null
|
||||
? materializedAs(streamsBuilder, bindingDestination, materializedAs,
|
||||
keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties)
|
||||
: streamsBuilder.table(bindingDestination,
|
||||
consumed);
|
||||
}
|
||||
|
||||
private <K, V> Consumed<K, V> getConsumed(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
Serde<K> keySerde, Serde<V> valueSerde, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
TimestampExtractor timestampExtractor = null;
|
||||
if (!StringUtils.isEmpty(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName())) {
|
||||
timestampExtractor = applicationContext.getBean(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName(),
|
||||
TimestampExtractor.class);
|
||||
}
|
||||
final Consumed<K, V> consumed = Consumed.with(keySerde, valueSerde)
|
||||
.withOffsetResetPolicy(autoOffsetReset);
|
||||
if (timestampExtractor != null) {
|
||||
consumed.withTimestampExtractor(timestampExtractor);
|
||||
}
|
||||
return consumed;
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationPropertiesBindHandlerAdvisor;
|
||||
import org.springframework.boot.context.properties.bind.AbstractBindHandler;
|
||||
import org.springframework.boot.context.properties.bind.BindContext;
|
||||
import org.springframework.boot.context.properties.bind.BindHandler;
|
||||
import org.springframework.boot.context.properties.bind.BindResult;
|
||||
import org.springframework.boot.context.properties.bind.Bindable;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationPropertiesBindHandlerAdvisor} to detect nativeEncoding/Decoding settings
|
||||
* provided by the application explicitly.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class EncodingDecodingBindAdviceHandler implements ConfigurationPropertiesBindHandlerAdvisor {
|
||||
|
||||
private boolean encodingSettingProvided;
|
||||
private boolean decodingSettingProvided;
|
||||
|
||||
public boolean isDecodingSettingProvided() {
|
||||
return decodingSettingProvided;
|
||||
}
|
||||
|
||||
public boolean isEncodingSettingProvided() {
|
||||
return this.encodingSettingProvided;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BindHandler apply(BindHandler bindHandler) {
|
||||
BindHandler handler = new AbstractBindHandler(bindHandler) {
|
||||
@Override
|
||||
public <T> Bindable<T> onStart(ConfigurationPropertyName name,
|
||||
Bindable<T> target, BindContext context) {
|
||||
final String configName = name.toString();
|
||||
if (configName.contains("use") && configName.contains("native") &&
|
||||
(configName.contains("encoding") || configName.contains("decoding"))) {
|
||||
BindResult<T> result = context.getBinder().bind(name, target);
|
||||
if (result.isBound()) {
|
||||
if (configName.contains("encoding")) {
|
||||
EncodingDecodingBindAdviceHandler.this.encodingSettingProvided = true;
|
||||
}
|
||||
else {
|
||||
EncodingDecodingBindAdviceHandler.this.decodingSettingProvided = true;
|
||||
}
|
||||
return target.withExistingValue(result.get());
|
||||
}
|
||||
}
|
||||
return bindHandler.onStart(name, target, context);
|
||||
}
|
||||
};
|
||||
return handler;
|
||||
}
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* An {@link AbstractBinder} implementation for {@link GlobalKTable}.
|
||||
* <p>
|
||||
* Provides only consumer binding for the bound {@link GlobalKTable}. Output bindings are
|
||||
* not allowed on this binder.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class GlobalKTableBinder extends
|
||||
// @checkstyle:off
|
||||
AbstractBinder<GlobalKTable<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements
|
||||
ExtendedPropertiesBinder<GlobalKTable<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
// @checkstyle:on
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
// @checkstyle:off
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
public GlobalKTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<GlobalKTable<Object, Object>> doBindConsumer(String name,
|
||||
String group, GlobalKTable<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = properties.getExtension().getApplicationId();
|
||||
}
|
||||
KafkaStreamsBinderUtils.prepareConsumerBinding(name, group,
|
||||
getApplicationContext(), this.kafkaTopicProvisioner,
|
||||
this.binderConfigurationProperties, properties);
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<GlobalKTable<Object, Object>> doBindProducer(String name,
|
||||
GlobalKTable<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
throw new UnsupportedOperationException(
|
||||
"No producer level binding is allowed for GlobalKTable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(
|
||||
String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(
|
||||
String channelName) {
|
||||
throw new UnsupportedOperationException(
|
||||
"No producer binding is allowed and therefore no properties");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDefaultsPrefix() {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getDefaultsPrefix();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedPropertiesEntryClass();
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* Configuration for GlobalKTable binder.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.1.0
|
||||
*/
|
||||
@Configuration
|
||||
@Import({ KafkaAutoConfiguration.class,
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class })
|
||||
public class GlobalKTableBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GlobalKTableBinder GlobalKTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
@Qualifier("streamConfigGlobalProperties") Map<String, Object> streamConfigGlobalProperties) {
|
||||
GlobalKTableBinder globalKTableBinder = new GlobalKTableBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner);
|
||||
globalKTableBinder.setKafkaStreamsExtendedBindingProperties(
|
||||
kafkaStreamsExtendedBindingProperties);
|
||||
return globalKTableBinder;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public static BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
|
||||
// It is safe to call getBean("outerContext") here, because this bean is
|
||||
// registered as first
|
||||
// and as independent from the parent context.
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory
|
||||
.getBean("outerContext");
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBinderConfigurationProperties.class.getSimpleName(),
|
||||
outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsExtendedBindingProperties.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binding.BindingTargetFactory} for
|
||||
* {@link GlobalKTable}
|
||||
*
|
||||
* Input bindings are only created as output bindings on GlobalKTable are not allowed.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class GlobalKTableBoundElementFactory
|
||||
extends AbstractBindingTargetFactory<GlobalKTable> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler;
|
||||
|
||||
GlobalKTableBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
super(GlobalKTable.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.encodingDecodingBindAdviceHandler = encodingDecodingBindAdviceHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GlobalKTable createInput(String name) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ConsumerProperties consumerProperties = bindingProperties.getConsumer();
|
||||
if (consumerProperties == null) {
|
||||
consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isDecodingSettingProvided()) {
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
// Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
|
||||
// @checkstyle:off
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapperHandler wrapper = new GlobalKTableBoundElementFactory.GlobalKTableWrapperHandler();
|
||||
// @checkstyle:on
|
||||
ProxyFactory proxyFactory = new ProxyFactory(
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapper.class,
|
||||
GlobalKTable.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
return (GlobalKTable) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
@Override
|
||||
public GlobalKTable createOutput(String name) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Outbound operations are not allowed on target type GlobalKTable");
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper for GlobalKTable proxy.
|
||||
*/
|
||||
public interface GlobalKTableWrapper {
|
||||
|
||||
void wrap(GlobalKTable<Object, Object> delegate);
|
||||
|
||||
}
|
||||
|
||||
private static class GlobalKTableWrapperHandler implements
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapper, MethodInterceptor {
|
||||
|
||||
private GlobalKTable<Object, Object> delegate;
|
||||
|
||||
public void wrap(GlobalKTable<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
if (this.delegate == null) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass()
|
||||
.equals(GlobalKTable.class)) {
|
||||
Assert.notNull(this.delegate,
|
||||
"Trying to prepareConsumerBinding " + methodInvocation.getMethod()
|
||||
+ " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(this.delegate,
|
||||
methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass()
|
||||
.equals(GlobalKTableBoundElementFactory.GlobalKTableWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this,
|
||||
methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(
|
||||
"Only GlobalKTable method invocations are permitted");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.HostInfo;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
import org.apache.kafka.streams.state.StreamsMetadata;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.retry.RetryPolicy;
|
||||
import org.springframework.retry.backoff.FixedBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Services pertinent to the interactive query capabilities of Kafka Streams. This class
|
||||
* provides services such as querying for a particular store, which instance is hosting a
|
||||
* particular store etc. This is part of the public API of the kafka streams binder and
|
||||
* the users can inject this service in their applications to make use of it.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class InteractiveQueryService {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(InteractiveQueryService.class);
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
/**
|
||||
* Constructor for InteractiveQueryService.
|
||||
* @param kafkaStreamsRegistry holding {@link KafkaStreamsRegistry}
|
||||
* @param binderConfigurationProperties kafka Streams binder configuration properties
|
||||
*/
|
||||
public InteractiveQueryService(KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return a queryable store by name created in the application.
|
||||
* @param storeName name of the queryable store
|
||||
* @param storeType type of the queryable store
|
||||
* @param <T> generic queryable store
|
||||
* @return queryable store.
|
||||
*/
|
||||
public <T> T getQueryableStore(String storeName, QueryableStoreType<T> storeType) {
|
||||
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
KafkaStreamsBinderConfigurationProperties.StateStoreRetry stateStoreRetry = this.binderConfigurationProperties.getStateStoreRetry();
|
||||
RetryPolicy retryPolicy = new SimpleRetryPolicy(stateStoreRetry.getMaxAttempts());
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(stateStoreRetry.getBackoffPeriod());
|
||||
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
retryTemplate.setRetryPolicy(retryPolicy);
|
||||
|
||||
return retryTemplate.execute(context -> {
|
||||
T store = null;
|
||||
|
||||
final Set<KafkaStreams> kafkaStreams = InteractiveQueryService.this.kafkaStreamsRegistry.getKafkaStreams();
|
||||
final Iterator<KafkaStreams> iterator = kafkaStreams.iterator();
|
||||
Throwable throwable = null;
|
||||
while (iterator.hasNext()) {
|
||||
try {
|
||||
store = iterator.next().store(storeName, storeType);
|
||||
}
|
||||
catch (InvalidStateStoreException e) {
|
||||
// pass through..
|
||||
throwable = e;
|
||||
}
|
||||
}
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
throw new IllegalStateException("Error when retrieving state store: j " + storeName, throwable);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current {@link HostInfo} that the calling kafka streams application is
|
||||
* running on.
|
||||
*
|
||||
* Note that the end user applications must provide `applicaiton.server` as a
|
||||
* configuration property when calling this method. If this is not available, then
|
||||
* null is returned.
|
||||
* @return the current {@link HostInfo}
|
||||
*/
|
||||
public HostInfo getCurrentHostInfo() {
|
||||
Map<String, String> configuration = this.binderConfigurationProperties
|
||||
.getConfiguration();
|
||||
if (configuration.containsKey("application.server")) {
|
||||
|
||||
String applicationServer = configuration.get("application.server");
|
||||
String[] splits = StringUtils.split(applicationServer, ":");
|
||||
|
||||
return new HostInfo(splits[0], Integer.valueOf(splits[1]));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the {@link HostInfo} where the provided store and key are hosted on. This may
|
||||
* not be the current host that is running the application. Kafka Streams will look
|
||||
* through all the consumer instances under the same application id and retrieves the
|
||||
* proper host.
|
||||
*
|
||||
* Note that the end user applications must provide `applicaiton.server` as a
|
||||
* configuration property for all the application instances when calling this method.
|
||||
* If this is not available, then null maybe returned.
|
||||
* @param <K> generic type for key
|
||||
* @param store store name
|
||||
* @param key key to look for
|
||||
* @param serializer {@link Serializer} for the key
|
||||
* @return the {@link HostInfo} where the key for the provided store is hosted
|
||||
* currently
|
||||
*/
|
||||
public <K> HostInfo getHostInfo(String store, K key, Serializer<K> serializer) {
|
||||
StreamsMetadata streamsMetadata = this.kafkaStreamsRegistry.getKafkaStreams()
|
||||
.stream()
|
||||
.map((k) -> Optional.ofNullable(k.metadataForKey(store, key, serializer)))
|
||||
.filter(Optional::isPresent).map(Optional::get).findFirst().orElse(null);
|
||||
return streamsMetadata != null ? streamsMetadata.hostInfo() : null;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Produced;
|
||||
import org.apache.kafka.streams.processor.StreamPartitioner;
|
||||
|
||||
import org.springframework.aop.framework.Advised;
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binder.Binder} implementation for
|
||||
* {@link KStream}. This implemenation extends from the {@link AbstractBinder} directly.
|
||||
* <p>
|
||||
* Provides both producer and consumer bindings for the bound KStream.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamBinder extends
|
||||
// @checkstyle:off
|
||||
AbstractBinder<KStream<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements
|
||||
ExtendedPropertiesBinder<KStream<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(KStreamBinder.class);
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
// @checkstyle:off
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
KStreamBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.kafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KStream<Object, Object>> doBindConsumer(String name, String group,
|
||||
KStream<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
|
||||
KStream<Object, Object> delegate = ((KStreamBoundElementFactory.KStreamWrapperHandler)
|
||||
((Advised) inputTarget).getAdvisors()[0].getAdvice()).getDelegate();
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerConsumerProperties(delegate, properties.getExtension());
|
||||
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = properties.getExtension().getApplicationId();
|
||||
}
|
||||
KafkaStreamsBinderUtils.prepareConsumerBinding(name, group,
|
||||
getApplicationContext(), this.kafkaTopicProvisioner,
|
||||
this.binderConfigurationProperties, properties);
|
||||
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindProducer(String name,
|
||||
KStream<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties =
|
||||
(ExtendedProducerProperties) properties;
|
||||
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name, extendedProducerProperties);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver
|
||||
.getOuboundKeySerde(properties.getExtension(), kafkaStreamsBindingInformationCatalogue.getOutboundKStreamResolvable());
|
||||
LOG.info("Key Serde used for (outbound) " + name + ": " + keySerde.getClass().getName());
|
||||
|
||||
Serde<?> valueSerde;
|
||||
if (properties.isUseNativeEncoding()) {
|
||||
valueSerde = this.keyValueSerdeResolver.getOutboundValueSerde(properties,
|
||||
properties.getExtension(), kafkaStreamsBindingInformationCatalogue.getOutboundKStreamResolvable());
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
LOG.info("Value Serde used for (outbound) " + name + ": " + valueSerde.getClass().getName());
|
||||
|
||||
to(properties.isUseNativeEncoding(), name, outboundBindTarget,
|
||||
(Serde<Object>) keySerde, (Serde<Object>) valueSerde, properties.getExtension());
|
||||
return new DefaultBinding<>(name, null, outboundBindTarget, null);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void to(boolean isNativeEncoding, String name,
|
||||
KStream<Object, Object> outboundBindTarget, Serde<Object> keySerde,
|
||||
Serde<Object> valueSerde, KafkaStreamsProducerProperties properties) {
|
||||
final Produced<Object, Object> produced = Produced.with(keySerde, valueSerde);
|
||||
StreamPartitioner streamPartitioner = null;
|
||||
if (!StringUtils.isEmpty(properties.getStreamPartitionerBeanName())) {
|
||||
streamPartitioner = getApplicationContext().getBean(properties.getStreamPartitionerBeanName(),
|
||||
StreamPartitioner.class);
|
||||
}
|
||||
if (streamPartitioner != null) {
|
||||
produced.withStreamPartitioner(streamPartitioner);
|
||||
}
|
||||
if (!isNativeEncoding) {
|
||||
LOG.info("Native encoding is disabled for " + name
|
||||
+ ". Outbound message conversion done by Spring Cloud Stream.");
|
||||
outboundBindTarget.filter((k, v) -> v == null)
|
||||
.to(name, produced);
|
||||
this.kafkaStreamsMessageConversionDelegate
|
||||
.serializeOnOutbound(outboundBindTarget)
|
||||
.to(name, produced);
|
||||
}
|
||||
else {
|
||||
LOG.info("Native encoding is enabled for " + name
|
||||
+ ". Outbound serialization done at the broker.");
|
||||
outboundBindTarget.to(name, produced);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(
|
||||
String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(
|
||||
String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDefaultsPrefix() {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getDefaultsPrefix();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedPropertiesEntryClass();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* Configuration for KStream binder.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@Import({ KafkaAutoConfiguration.class,
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class })
|
||||
public class KStreamBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(kafkaStreamsBinderConfigurationProperties,
|
||||
kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBinder kStreamBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
KStreamBinder kStreamBinder = new KStreamBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner, KafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue, keyValueSerdeResolver);
|
||||
kStreamBinder.setKafkaStreamsExtendedBindingProperties(
|
||||
kafkaStreamsExtendedBindingProperties);
|
||||
return kStreamBinder;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public static BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
|
||||
// It is safe to call getBean("outerContext") here, because this bean is
|
||||
// registered as first
|
||||
// and as independent from the parent context.
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory
|
||||
.getBean("outerContext");
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBinderConfigurationProperties.class.getSimpleName(),
|
||||
outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsMessageConversionDelegate.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsMessageConversionDelegate.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBindingInformationCatalogue.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
beanFactory.registerSingleton(KeyValueSerdeResolver.class.getSimpleName(),
|
||||
outerContext.getBean(KeyValueSerdeResolver.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsExtendedBindingProperties.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binding.BindingTargetFactory}
|
||||
* for{@link KStream}.
|
||||
*
|
||||
* The implementation creates proxies for both input and output binding. The actual target
|
||||
* will be created downstream through further binding process.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler;
|
||||
|
||||
KStreamBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
super(KStream.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
this.encodingDecodingBindAdviceHandler = encodingDecodingBindAdviceHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ConsumerProperties consumerProperties = bindingProperties.getConsumer();
|
||||
if (consumerProperties == null) {
|
||||
consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isDecodingSettingProvided()) {
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
// Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream createOutput(final String name) {
|
||||
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ProducerProperties producerProperties = bindingProperties.getProducer();
|
||||
if (producerProperties == null) {
|
||||
producerProperties = this.bindingServiceProperties.getProducerProperties(name);
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isEncodingSettingProvided()) {
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
}
|
||||
}
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
private KStream createProxyForKStream(String name) {
|
||||
KStreamWrapperHandler wrapper = new KStreamWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KStreamWrapper.class, KStream.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
KStream proxy = (KStream) proxyFactory.getProxy();
|
||||
|
||||
// Add the binding properties to the catalogue for later retrieval during further
|
||||
// binding steps downstream.
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties
|
||||
.getBindingProperties(name);
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerBindingProperties(proxy,
|
||||
bindingProperties);
|
||||
return proxy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper object for KStream proxy.
|
||||
*/
|
||||
public interface KStreamWrapper {
|
||||
|
||||
void wrap(KStream<Object, Object> delegate);
|
||||
|
||||
}
|
||||
|
||||
static class KStreamWrapperHandler
|
||||
implements KStreamWrapper, MethodInterceptor {
|
||||
|
||||
private KStream<Object, Object> delegate;
|
||||
|
||||
public void wrap(KStream<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
if (this.delegate == null) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KStream.class)) {
|
||||
Assert.notNull(this.delegate,
|
||||
"Trying to prepareConsumerBinding " + methodInvocation.getMethod()
|
||||
+ " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(this.delegate,
|
||||
methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass()
|
||||
.equals(KStreamWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this,
|
||||
methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(
|
||||
"Only KStream method invocations are permitted");
|
||||
}
|
||||
}
|
||||
|
||||
KStream<Object, Object> getDelegate() {
|
||||
return delegate;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.ResolvableType;
|
||||
|
||||
/**
|
||||
* {@link StreamListenerParameterAdapter} for KStream.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamStreamListenerParameterAdapter
|
||||
implements StreamListenerParameterAdapter<KStream<?, ?>, KStream<?, ?>> {
|
||||
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
KStreamStreamListenerParameterAdapter(
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.KafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(Class bindingTargetType, MethodParameter methodParameter) {
|
||||
return KafkaStreamsBinderUtils.supportsKStream(methodParameter, bindingTargetType);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream adapt(KStream<?, ?> bindingTarget, MethodParameter parameter) {
|
||||
ResolvableType resolvableType = ResolvableType.forMethodParameter(parameter);
|
||||
final Class<?> valueClass = (resolvableType.getGeneric(1).getRawClass() != null)
|
||||
? (resolvableType.getGeneric(1).getRawClass()) : Object.class;
|
||||
if (this.KafkaStreamsBindingInformationCatalogue
|
||||
.isUseNativeDecoding(bindingTarget)) {
|
||||
return bindingTarget;
|
||||
}
|
||||
else {
|
||||
return this.kafkaStreamsMessageConversionDelegate
|
||||
.deserializeOnInbound(valueClass, bindingTarget);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
|
||||
/**
|
||||
* {@link StreamListenerResultAdapter} for KStream.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamStreamListenerResultAdapter implements
|
||||
StreamListenerResultAdapter<KStream, KStreamBoundElementFactory.KStreamWrapper> {
|
||||
|
||||
@Override
|
||||
public boolean supports(Class<?> resultType, Class<?> boundElement) {
|
||||
return KStream.class.isAssignableFrom(resultType)
|
||||
&& KStream.class.isAssignableFrom(boundElement);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Closeable adapt(KStream streamListenerResult,
|
||||
KStreamBoundElementFactory.KStreamWrapper boundElement) {
|
||||
boundElement.wrap(streamListenerResult);
|
||||
return new NoOpCloseable();
|
||||
}
|
||||
|
||||
private static final class NoOpCloseable implements Closeable {
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binder.Binder} implementation for
|
||||
* {@link KTable}. This implemenation extends from the {@link AbstractBinder} directly.
|
||||
*
|
||||
* Provides only consumer binding for the bound KTable as output bindings are not allowed
|
||||
* on it.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KTableBinder extends
|
||||
// @checkstyle:off
|
||||
AbstractBinder<KTable<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements
|
||||
ExtendedPropertiesBinder<KTable<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
// @checkstyle:off
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
KTableBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KTable<Object, Object>> doBindConsumer(String name, String group,
|
||||
KTable<Object, Object> inputTarget,
|
||||
// @checkstyle:off
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
// @checkstyle:on
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = properties.getExtension().getApplicationId();
|
||||
}
|
||||
KafkaStreamsBinderUtils.prepareConsumerBinding(name, group,
|
||||
getApplicationContext(), this.kafkaTopicProvisioner,
|
||||
this.binderConfigurationProperties, properties);
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KTable<Object, Object>> doBindProducer(String name,
|
||||
KTable<Object, Object> outboundBindTarget,
|
||||
// @checkstyle:off
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
// @checkstyle:on
|
||||
throw new UnsupportedOperationException(
|
||||
"No producer level binding is allowed for KTable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(
|
||||
String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(
|
||||
String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDefaultsPrefix() {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getDefaultsPrefix();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedPropertiesEntryClass();
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* Configuration for KTable binder.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@SuppressWarnings("ALL")
|
||||
@Configuration
|
||||
@Import({ KafkaAutoConfiguration.class,
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class })
|
||||
public class KTableBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBinder kTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
@Qualifier("streamConfigGlobalProperties") Map<String, Object> streamConfigGlobalProperties) {
|
||||
KTableBinder kTableBinder = new KTableBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner);
|
||||
kTableBinder.setKafkaStreamsExtendedBindingProperties(kafkaStreamsExtendedBindingProperties);
|
||||
return kTableBinder;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public static BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
|
||||
// It is safe to call getBean("outerContext") here, because this bean is
|
||||
// registered as first
|
||||
// and as independent from the parent context.
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory
|
||||
.getBean("outerContext");
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBinderConfigurationProperties.class.getSimpleName(),
|
||||
outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsExtendedBindingProperties.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binding.BindingTargetFactory} for
|
||||
* {@link KTable}
|
||||
*
|
||||
* Input bindings are only created as output bindings on KTable are not allowed.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KTableBoundElementFactory extends AbstractBindingTargetFactory<KTable> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler;
|
||||
|
||||
KTableBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
super(KTable.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.encodingDecodingBindAdviceHandler = encodingDecodingBindAdviceHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KTable createInput(String name) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ConsumerProperties consumerProperties = bindingProperties.getConsumer();
|
||||
if (consumerProperties == null) {
|
||||
consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isDecodingSettingProvided()) {
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
// Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
|
||||
KTableBoundElementFactory.KTableWrapperHandler wrapper = new KTableBoundElementFactory.KTableWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(
|
||||
KTableBoundElementFactory.KTableWrapper.class, KTable.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
return (KTable) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KTable createOutput(final String name) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Outbound operations are not allowed on target type KTable");
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper for KTable proxy.
|
||||
*/
|
||||
public interface KTableWrapper {
|
||||
|
||||
void wrap(KTable<Object, Object> delegate);
|
||||
|
||||
}
|
||||
|
||||
private static class KTableWrapperHandler
|
||||
implements KTableBoundElementFactory.KTableWrapper, MethodInterceptor {
|
||||
|
||||
private KTable<Object, Object> delegate;
|
||||
|
||||
public void wrap(KTable<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
if (this.delegate == null) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KTable.class)) {
|
||||
Assert.notNull(this.delegate,
|
||||
"Trying to prepareConsumerBinding " + methodInvocation.getMethod()
|
||||
+ " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(this.delegate,
|
||||
methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass()
|
||||
.equals(KTableBoundElementFactory.KTableWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this,
|
||||
methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(
|
||||
"Only KTable method invocations are permitted");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,172 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.processor.TaskMetadata;
|
||||
import org.apache.kafka.streams.processor.ThreadMetadata;
|
||||
|
||||
import org.springframework.boot.actuate.health.AbstractHealthIndicator;
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.Status;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* Health indicator for Kafka Streams.
|
||||
*
|
||||
* @author Arnaud Jardiné
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsBinderHealthIndicator extends AbstractHealthIndicator {
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
private final KafkaStreamsBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final Map<String, Object> adminClientProperties;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private static final ThreadLocal<Status> healthStatusThreadLocal = new ThreadLocal<>();
|
||||
|
||||
private AdminClient adminClient;
|
||||
|
||||
KafkaStreamsBinderHealthIndicator(KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
super("Kafka-streams health check failed");
|
||||
kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaStreamsBinderConfigurationProperties;
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
KafkaTopicProvisioner.normalalizeBootPropsWithBinder(this.adminClientProperties, kafkaProperties,
|
||||
kafkaStreamsBinderConfigurationProperties);
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doHealthCheck(Health.Builder builder) throws Exception {
|
||||
try {
|
||||
initAdminClient();
|
||||
synchronized (this.adminClient) {
|
||||
final Status status = healthStatusThreadLocal.get();
|
||||
//If one of the kafka streams binders (kstream, ktable, globalktable) was down before on the same request,
|
||||
//retrieve that from the thead local storage where it was saved before. This is done in order to avoid
|
||||
//the duration of the total health check since in the case of Kafka Streams each binder tries to do
|
||||
//its own health check and since we already know that this is DOWN, simply pass that information along.
|
||||
if (status == Status.DOWN) {
|
||||
builder.withDetail("No topic information available", "Kafka broker is not reachable");
|
||||
builder.status(Status.DOWN);
|
||||
}
|
||||
else {
|
||||
final ListTopicsResult listTopicsResult = this.adminClient.listTopics();
|
||||
listTopicsResult.listings().get(this.configurationProperties.getHealthTimeout(), TimeUnit.SECONDS);
|
||||
|
||||
if (this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeans().isEmpty()) {
|
||||
builder.withDetail("No Kafka Streams bindings have been established", "Kafka Streams binder did not detect any processors");
|
||||
builder.status(Status.UNKNOWN);
|
||||
}
|
||||
else {
|
||||
boolean up = true;
|
||||
for (KafkaStreams kStream : kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
up &= kStream.state().isRunning();
|
||||
builder.withDetails(buildDetails(kStream));
|
||||
}
|
||||
builder.status(up ? Status.UP : Status.DOWN);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
builder.withDetail("No topic information available", "Kafka broker is not reachable");
|
||||
builder.status(Status.DOWN);
|
||||
builder.withException(e);
|
||||
//Store binder down status into a thread local storage.
|
||||
healthStatusThreadLocal.set(Status.DOWN);
|
||||
}
|
||||
finally {
|
||||
// Close admin client immediately.
|
||||
if (adminClient != null) {
|
||||
adminClient.close(Duration.ofSeconds(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized AdminClient initAdminClient() {
|
||||
if (this.adminClient == null) {
|
||||
this.adminClient = AdminClient.create(this.adminClientProperties);
|
||||
}
|
||||
return this.adminClient;
|
||||
}
|
||||
|
||||
private Map<String, Object> buildDetails(KafkaStreams kafkaStreams) {
|
||||
final Map<String, Object> details = new HashMap<>();
|
||||
final Map<String, Object> perAppdIdDetails = new HashMap<>();
|
||||
|
||||
if (kafkaStreams.state().isRunning()) {
|
||||
for (ThreadMetadata metadata : kafkaStreams.localThreadsMetadata()) {
|
||||
perAppdIdDetails.put("threadName", metadata.threadName());
|
||||
perAppdIdDetails.put("threadState", metadata.threadState());
|
||||
perAppdIdDetails.put("adminClientId", metadata.adminClientId());
|
||||
perAppdIdDetails.put("consumerClientId", metadata.consumerClientId());
|
||||
perAppdIdDetails.put("restoreConsumerClientId", metadata.restoreConsumerClientId());
|
||||
perAppdIdDetails.put("producerClientIds", metadata.producerClientIds());
|
||||
perAppdIdDetails.put("activeTasks", taskDetails(metadata.activeTasks()));
|
||||
perAppdIdDetails.put("standbyTasks", taskDetails(metadata.standbyTasks()));
|
||||
}
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsRegistry.streamBuilderFactoryBean(kafkaStreams);
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
details.put(applicationId, perAppdIdDetails);
|
||||
}
|
||||
else {
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsRegistry.streamBuilderFactoryBean(kafkaStreams);
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
details.put(applicationId, String.format("The processor with application.id %s is down", applicationId));
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
private static Map<String, Object> taskDetails(Set<TaskMetadata> taskMetadata) {
|
||||
final Map<String, Object> details = new HashMap<>();
|
||||
for (TaskMetadata metadata : taskMetadata) {
|
||||
details.put("taskId", metadata.taskId());
|
||||
details.put("partitions",
|
||||
metadata.topicPartitions().stream().map(
|
||||
p -> "partition=" + p.partition() + ", topic=" + p.topic())
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.boot.actuate.autoconfigure.health.ConditionalOnEnabledHealthIndicator;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* Configuration class for Kafka-streams binder health indicator beans.
|
||||
*
|
||||
* @author Arnaud Jardiné
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnClass(name = "org.springframework.boot.actuate.health.HealthIndicator")
|
||||
@ConditionalOnEnabledHealthIndicator("binders")
|
||||
class KafkaStreamsBinderHealthIndicatorConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(KafkaStreamsRegistry.class)
|
||||
KafkaStreamsBinderHealthIndicator kafkaStreamsBinderHealthIndicator(
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry, KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties, KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
|
||||
return new KafkaStreamsBinderHealthIndicator(kafkaStreamsRegistry, kafkaStreamsBinderConfigurationProperties,
|
||||
kafkaProperties, kafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.ToDoubleFunction;
|
||||
|
||||
import io.micrometer.core.instrument.Gauge;
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.kafka.common.Metric;
|
||||
import org.apache.kafka.common.MetricName;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* Kafka Streams binder metrics implementation that exports the metrics available
|
||||
* through {@link KafkaStreams#metrics()} into a micrometer {@link io.micrometer.core.instrument.MeterRegistry}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class KafkaStreamsBinderMetrics {
|
||||
|
||||
private final MeterRegistry meterRegistry;
|
||||
|
||||
private MeterBinder meterBinder;
|
||||
|
||||
public KafkaStreamsBinderMetrics(MeterRegistry meterRegistry) {
|
||||
this.meterRegistry = meterRegistry;
|
||||
}
|
||||
|
||||
public void bindTo(Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans, MeterRegistry meterRegistry) {
|
||||
|
||||
if (this.meterBinder == null) {
|
||||
this.meterBinder = new MeterBinder() {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void bindTo(MeterRegistry registry) {
|
||||
if (streamsBuilderFactoryBeans != null) {
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
final Map<MetricName, ? extends Metric> metrics = kafkaStreams.metrics();
|
||||
|
||||
Set<String> meterNames = new HashSet<>();
|
||||
|
||||
for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
|
||||
final String sanitized = sanitize(metric.getKey().group() + "." + metric.getKey().name());
|
||||
final String applicationId = streamsBuilderFactoryBean.getStreamsConfiguration().getProperty(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
|
||||
final String name = streamsBuilderFactoryBeans.size() > 1 ? applicationId + "." + sanitized : sanitized;
|
||||
|
||||
final Gauge.Builder<KafkaStreamsBinderMetrics> builder =
|
||||
Gauge.builder(name, this,
|
||||
toDoubleFunction(metric.getValue()));
|
||||
final Map<String, String> tags = metric.getKey().tags();
|
||||
for (Map.Entry<String, String> tag : tags.entrySet()) {
|
||||
builder.tag(tag.getKey(), tag.getValue());
|
||||
}
|
||||
if (!meterNames.contains(name)) {
|
||||
builder.description(metric.getKey().description())
|
||||
.register(meterRegistry);
|
||||
meterNames.add(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
ToDoubleFunction toDoubleFunction(Metric metric) {
|
||||
return (o) -> {
|
||||
if (metric.metricValue() instanceof Number) {
|
||||
return (Double) metric.metricValue();
|
||||
}
|
||||
else {
|
||||
return 0.0;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
this.meterBinder.bindTo(this.meterRegistry);
|
||||
}
|
||||
|
||||
private static String sanitize(String value) {
|
||||
return value.replaceAll("-", ".");
|
||||
}
|
||||
|
||||
public void addMetrics(Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans) {
|
||||
synchronized (KafkaStreamsBinderMetrics.this) {
|
||||
this.bindTo(streamsBuilderFactoryBeans, this.meterRegistry);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,417 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
|
||||
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.function.FunctionDetectorCondition;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.serde.CompositeNonNativeSerde;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.serde.MessageConverterDelegateSerde;
|
||||
import org.springframework.cloud.stream.binding.BindingService;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.config.BinderProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceConfiguration;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Conditional;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.core.env.Environment;
|
||||
import org.springframework.core.env.MapPropertySource;
|
||||
import org.springframework.integration.context.IntegrationContextUtils;
|
||||
import org.springframework.kafka.config.KafkaStreamsConfiguration;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanCustomizer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.streams.RecoveringDeserializationExceptionHandler;
|
||||
import org.springframework.lang.Nullable;
|
||||
import org.springframework.messaging.converter.CompositeMessageConverter;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka Streams binder configuration.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KafkaStreamsExtendedBindingProperties.class)
|
||||
@ConditionalOnBean(BindingService.class)
|
||||
@AutoConfigureAfter(BindingServiceConfiguration.class)
|
||||
public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
private static final String KSTREAM_BINDER_TYPE = "kstream";
|
||||
|
||||
private static final String KTABLE_BINDER_TYPE = "ktable";
|
||||
|
||||
private static final String GLOBALKTABLE_BINDER_TYPE = "globalktable";
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.streams.binder")
|
||||
public KafkaStreamsBinderConfigurationProperties binderConfigurationProperties(
|
||||
KafkaProperties kafkaProperties, ConfigurableEnvironment environment,
|
||||
BindingServiceProperties properties) {
|
||||
final Map<String, BinderConfiguration> binderConfigurations = getBinderConfigurations(
|
||||
properties);
|
||||
for (Map.Entry<String, BinderConfiguration> entry : binderConfigurations
|
||||
.entrySet()) {
|
||||
final BinderConfiguration binderConfiguration = entry.getValue();
|
||||
final String binderType = binderConfiguration.getBinderType();
|
||||
if (binderType != null && (binderType.equals(KSTREAM_BINDER_TYPE)
|
||||
|| binderType.equals(KTABLE_BINDER_TYPE)
|
||||
|| binderType.equals(GLOBALKTABLE_BINDER_TYPE))) {
|
||||
Map<String, Object> binderProperties = new HashMap<>();
|
||||
this.flatten(null, binderConfiguration.getProperties(), binderProperties);
|
||||
environment.getPropertySources().addFirst(
|
||||
new MapPropertySource("kafkaStreamsBinderEnv", binderProperties));
|
||||
}
|
||||
}
|
||||
return new KafkaStreamsBinderConfigurationProperties(kafkaProperties);
|
||||
}
|
||||
|
||||
// TODO: Lifted from core - good candidate for exposing as a utility method in core.
|
||||
private static Map<String, BinderConfiguration> getBinderConfigurations(
|
||||
BindingServiceProperties properties) {
|
||||
|
||||
Map<String, BinderConfiguration> binderConfigurations = new HashMap<>();
|
||||
Map<String, BinderProperties> declaredBinders = properties.getBinders();
|
||||
|
||||
for (Map.Entry<String, BinderProperties> binderEntry : declaredBinders
|
||||
.entrySet()) {
|
||||
BinderProperties binderProperties = binderEntry.getValue();
|
||||
binderConfigurations.put(binderEntry.getKey(),
|
||||
new BinderConfiguration(binderProperties.getType(),
|
||||
binderProperties.getEnvironment(),
|
||||
binderProperties.isInheritEnvironment(),
|
||||
binderProperties.isDefaultCandidate()));
|
||||
}
|
||||
return binderConfigurations;
|
||||
}
|
||||
|
||||
// TODO: Lifted from core - good candidate for exposing as a utility method in core.
|
||||
@SuppressWarnings("unchecked")
|
||||
private void flatten(String propertyName, Object value,
|
||||
Map<String, Object> flattenedProperties) {
|
||||
if (value instanceof Map) {
|
||||
((Map<Object, Object>) value).forEach((k, v) -> flatten(
|
||||
(propertyName != null ? propertyName + "." : "") + k, v,
|
||||
flattenedProperties));
|
||||
}
|
||||
else {
|
||||
flattenedProperties.put(propertyName, value.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsConfiguration kafkaStreamsConfiguration(
|
||||
KafkaStreamsBinderConfigurationProperties properties,
|
||||
Environment environment) {
|
||||
KafkaProperties kafkaProperties = properties.getKafkaProperties();
|
||||
Map<String, Object> streamsProperties = kafkaProperties.buildStreamsProperties();
|
||||
if (kafkaProperties.getStreams().getApplicationId() == null) {
|
||||
String applicationName = environment.getProperty("spring.application.name");
|
||||
if (applicationName != null) {
|
||||
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
applicationName);
|
||||
}
|
||||
}
|
||||
return new KafkaStreamsConfiguration(streamsProperties);
|
||||
}
|
||||
|
||||
@Bean("streamConfigGlobalProperties")
|
||||
public Map<String, Object> streamConfigGlobalProperties(
|
||||
KafkaStreamsBinderConfigurationProperties configProperties,
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration, ConfigurableEnvironment environment,
|
||||
SendToDlqAndContinue sendToDlqAndContinue) {
|
||||
|
||||
Properties properties = kafkaStreamsConfiguration.asProperties();
|
||||
|
||||
String kafkaConnectionString = configProperties.getKafkaConnectionString();
|
||||
|
||||
if (kafkaConnectionString != null && kafkaConnectionString.equals("localhost:9092")) {
|
||||
//Making sure that the application indeed set a property.
|
||||
String kafkaStreamsBinderBroker = environment.getProperty("spring.cloud.stream.kafka.streams.binder.brokers");
|
||||
|
||||
if (StringUtils.isEmpty(kafkaStreamsBinderBroker)) {
|
||||
//Kafka Streams binder specific property for brokers is not set by the application.
|
||||
//See if there is one configured at the kafka binder level.
|
||||
String kafkaBinderBroker = environment.getProperty("spring.cloud.stream.kafka.binder.brokers");
|
||||
if (!StringUtils.isEmpty(kafkaBinderBroker)) {
|
||||
kafkaConnectionString = kafkaBinderBroker;
|
||||
configProperties.setBrokers(kafkaConnectionString);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ObjectUtils.isEmpty(properties.get(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
}
|
||||
else {
|
||||
Object bootstrapServerConfig = properties
|
||||
.get(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootstrapServerConfig instanceof String) {
|
||||
@SuppressWarnings("unchecked")
|
||||
String bootStrapServers = (String) properties
|
||||
.get(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.equals("localhost:9092")) {
|
||||
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
}
|
||||
}
|
||||
else if (bootstrapServerConfig instanceof List) {
|
||||
List bootStrapCollection = (List) bootstrapServerConfig;
|
||||
if (bootStrapCollection.size() == 1 && bootStrapCollection.get(0).equals("localhost:9092")) {
|
||||
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
String binderProvidedApplicationId = configProperties.getApplicationId();
|
||||
if (StringUtils.hasText(binderProvidedApplicationId)) {
|
||||
properties.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
binderProvidedApplicationId);
|
||||
}
|
||||
|
||||
properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG,
|
||||
Serdes.ByteArraySerde.class.getName());
|
||||
properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,
|
||||
Serdes.ByteArraySerde.class.getName());
|
||||
|
||||
if (configProperties
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndContinue) {
|
||||
properties.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
}
|
||||
else if (configProperties
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndFail) {
|
||||
properties.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
}
|
||||
else if (configProperties
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
properties.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
RecoveringDeserializationExceptionHandler.class);
|
||||
properties.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, sendToDlqAndContinue);
|
||||
}
|
||||
|
||||
if (!ObjectUtils.isEmpty(configProperties.getConfiguration())) {
|
||||
properties.putAll(configProperties.getConfiguration());
|
||||
}
|
||||
return properties.entrySet().stream().collect(
|
||||
Collectors.toMap((e) -> String.valueOf(e.getKey()), Map.Entry::getValue));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerResultAdapter kstreamStreamListenerResultAdapter() {
|
||||
return new KStreamStreamListenerResultAdapter();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerParameterAdapter kstreamStreamListenerParameterAdapter(
|
||||
KafkaStreamsMessageConversionDelegate kstreamBoundMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new KStreamStreamListenerParameterAdapter(
|
||||
kstreamBoundMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsStreamListenerSetupMethodOrchestrator kafkaStreamsStreamListenerSetupMethodOrchestrator(
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KStreamStreamListenerParameterAdapter kafkaStreamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
ObjectProvider<CleanupConfig> cleanupConfig,
|
||||
ObjectProvider<StreamsBuilderFactoryBeanCustomizer> customizerProvider) {
|
||||
return new KafkaStreamsStreamListenerSetupMethodOrchestrator(
|
||||
bindingServiceProperties, kafkaStreamsExtendedBindingProperties,
|
||||
keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue,
|
||||
kafkaStreamListenerParameterAdapter, streamListenerResultAdapters,
|
||||
cleanupConfig.getIfUnique(), customizerProvider.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsMessageConversionDelegate messageConversionDelegate(
|
||||
@Qualifier(IntegrationContextUtils.ARGUMENT_RESOLVER_MESSAGE_CONVERTER_BEAN_NAME)
|
||||
CompositeMessageConverter compositeMessageConverter,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaStreamsMessageConversionDelegate(compositeMessageConverter, sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue, binderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MessageConverterDelegateSerde messageConverterDelegateSerde(
|
||||
@Qualifier(IntegrationContextUtils.ARGUMENT_RESOLVER_MESSAGE_CONVERTER_BEAN_NAME)
|
||||
CompositeMessageConverter compositeMessageConverterFactory) {
|
||||
return new MessageConverterDelegateSerde(compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CompositeNonNativeSerde compositeNonNativeSerde(
|
||||
@Qualifier(IntegrationContextUtils.ARGUMENT_RESOLVER_MESSAGE_CONVERTER_BEAN_NAME)
|
||||
CompositeMessageConverter compositeMessageConverterFactory) {
|
||||
return new CompositeNonNativeSerde(compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBoundElementFactory kStreamBoundElementFactory(
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
return new KStreamBoundElementFactory(bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue, encodingDecodingBindAdviceHandler);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBoundElementFactory kTableBoundElementFactory(
|
||||
BindingServiceProperties bindingServiceProperties, EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
return new KTableBoundElementFactory(bindingServiceProperties, encodingDecodingBindAdviceHandler);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GlobalKTableBoundElementFactory globalKTableBoundElementFactory(
|
||||
BindingServiceProperties properties, EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
return new GlobalKTableBoundElementFactory(properties, encodingDecodingBindAdviceHandler);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public SendToDlqAndContinue sendToDlqAndContinue() {
|
||||
return new SendToDlqAndContinue();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue() {
|
||||
return new KafkaStreamsBindingInformationCatalogue();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@SuppressWarnings("unchecked")
|
||||
@ConditionalOnMissingBean
|
||||
public KeyValueSerdeResolver keyValueSerdeResolver(
|
||||
@Qualifier("streamConfigGlobalProperties") Object streamConfigGlobalProperties,
|
||||
KafkaStreamsBinderConfigurationProperties properties) {
|
||||
return new KeyValueSerdeResolver(
|
||||
(Map<String, Object>) streamConfigGlobalProperties, properties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public InteractiveQueryService interactiveQueryServices(
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties properties) {
|
||||
return new InteractiveQueryService(kafkaStreamsRegistry, properties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsRegistry kafkaStreamsRegistry() {
|
||||
return new KafkaStreamsRegistry();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StreamsBuilderFactoryManager streamsBuilderFactoryManager(
|
||||
KafkaStreamsBindingInformationCatalogue catalogue,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
@Nullable KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics) {
|
||||
return new StreamsBuilderFactoryManager(catalogue, kafkaStreamsRegistry, kafkaStreamsBinderMetrics);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Conditional(FunctionDetectorCondition.class)
|
||||
public KafkaStreamsFunctionProcessor kafkaStreamsFunctionProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
ObjectProvider<CleanupConfig> cleanupConfig,
|
||||
StreamFunctionProperties streamFunctionProperties,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
ObjectProvider<StreamsBuilderFactoryBeanCustomizer> customizerProvider) {
|
||||
return new KafkaStreamsFunctionProcessor(bindingServiceProperties, kafkaStreamsExtendedBindingProperties,
|
||||
keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue, kafkaStreamsMessageConversionDelegate,
|
||||
cleanupConfig.getIfUnique(), streamFunctionProperties, kafkaStreamsBinderConfigurationProperties,
|
||||
customizerProvider.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler() {
|
||||
return new EncodingDecodingBindAdviceHandler();
|
||||
}
|
||||
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(value = KafkaStreamsBinderMetrics.class, name = "outerContext")
|
||||
@ConditionalOnClass(name = "io.micrometer.core.instrument.MeterRegistry")
|
||||
protected class KafkaStreamsBinderMetricsConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(MeterRegistry.class)
|
||||
@ConditionalOnMissingBean(KafkaStreamsBinderMetrics.class)
|
||||
public KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics(MeterRegistry meterRegistry) {
|
||||
|
||||
return new KafkaStreamsBinderMetrics(meterRegistry);
|
||||
}
|
||||
}
|
||||
|
||||
@Configuration
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
@ConditionalOnMissingBean(KafkaStreamsBinderMetrics.class)
|
||||
@ConditionalOnClass(name = "io.micrometer.core.instrument.MeterRegistry")
|
||||
protected class KafkaStreamsBinderMetricsConfigurationWithMultiBinder {
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics(ConfigurableApplicationContext context) {
|
||||
|
||||
MeterRegistry meterRegistry = context.getBean("outerContext", ApplicationContext.class)
|
||||
.getBean(MeterRegistry.class);
|
||||
return new KafkaStreamsBinderMetrics(meterRegistry);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.DlqPartitionFunction;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Common methods used by various Kafka Streams types across the binders.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
final class KafkaStreamsBinderUtils {
|
||||
|
||||
private static final Log LOGGER = LogFactory.getLog(KafkaStreamsBinderUtils.class);
|
||||
|
||||
private KafkaStreamsBinderUtils() {
|
||||
|
||||
}
|
||||
|
||||
static void prepareConsumerBinding(String name, String group,
|
||||
ApplicationContext context, KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties =
|
||||
(ExtendedConsumerProperties) properties;
|
||||
if (binderConfigurationProperties
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
extendedConsumerProperties.getExtension().setEnableDlq(true);
|
||||
}
|
||||
|
||||
String[] inputTopics = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String inputTopic : inputTopics) {
|
||||
kafkaTopicProvisioner.provisionConsumerDestination(inputTopic, group,
|
||||
extendedConsumerProperties);
|
||||
}
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
|
||||
Map<String, DlqPartitionFunction> partitionFunctions =
|
||||
context.getBeansOfType(DlqPartitionFunction.class, false, false);
|
||||
boolean oneFunctionPresent = partitionFunctions.size() == 1;
|
||||
Integer dlqPartitions = extendedConsumerProperties.getExtension().getDlqPartitions();
|
||||
DlqPartitionFunction partitionFunction = oneFunctionPresent
|
||||
? partitionFunctions.values().iterator().next()
|
||||
: DlqPartitionFunction.determineFallbackFunction(dlqPartitions, LOGGER);
|
||||
|
||||
ProducerFactory<byte[], byte[]> producerFactory = getProducerFactory(
|
||||
new ExtendedProducerProperties<>(
|
||||
extendedConsumerProperties.getExtension().getDlqProducerProperties()),
|
||||
binderConfigurationProperties);
|
||||
KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
|
||||
|
||||
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver =
|
||||
(cr, e) -> new TopicPartition(extendedConsumerProperties.getExtension().getDlqName(),
|
||||
partitionFunction.apply(group, cr, e));
|
||||
DeadLetterPublishingRecoverer kafkaStreamsBinderDlqRecoverer = !StringUtils
|
||||
.isEmpty(extendedConsumerProperties.getExtension().getDlqName())
|
||||
? new DeadLetterPublishingRecoverer(kafkaTemplate, destinationResolver)
|
||||
: null;
|
||||
for (String inputTopic : inputTopics) {
|
||||
if (StringUtils.isEmpty(
|
||||
extendedConsumerProperties.getExtension().getDlqName())) {
|
||||
destinationResolver = (cr, e) -> new TopicPartition("error." + inputTopic + "." + group,
|
||||
partitionFunction.apply(group, cr, e));
|
||||
kafkaStreamsBinderDlqRecoverer = new DeadLetterPublishingRecoverer(kafkaTemplate,
|
||||
destinationResolver);
|
||||
}
|
||||
|
||||
SendToDlqAndContinue sendToDlqAndContinue = context
|
||||
.getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(inputTopic,
|
||||
kafkaStreamsBinderDlqRecoverer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, configurationProperties.getRequiredAcks());
|
||||
Map<String, Object> mergedConfig = configurationProperties
|
||||
.mergedProducerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BATCH_SIZE_CONFIG))) {
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBufferSize()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.LINGER_MS_CONFIG))) {
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))) {
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
|
||||
producerProperties.getExtension().getCompressionType().toString());
|
||||
}
|
||||
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(producerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
// Always send as byte[] on dlq (the same byte[] that the consumer received)
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
|
||||
ByteArraySerializer.class);
|
||||
|
||||
return new DefaultKafkaProducerFactory<>(props);
|
||||
}
|
||||
|
||||
|
||||
static boolean supportsKStream(MethodParameter methodParameter, Class<?> targetBeanClass) {
|
||||
return KStream.class.isAssignableFrom(targetBeanClass)
|
||||
&& KStream.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
}
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* A catalogue that provides binding information for Kafka Streams target types such as
|
||||
* KStream. It also keeps a catalogue for the underlying {@link StreamsBuilderFactoryBean}
|
||||
* and {@link StreamsConfig} associated with various
|
||||
* {@link org.springframework.cloud.stream.annotation.StreamListener} methods in the
|
||||
* {@link org.springframework.context.ApplicationContext}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsBindingInformationCatalogue {
|
||||
|
||||
private final Map<KStream<?, ?>, BindingProperties> bindingProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Map<KStream<?, ?>, KafkaStreamsConsumerProperties> consumerProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = new HashSet<>();
|
||||
|
||||
private ResolvableType outboundKStreamResolvable;
|
||||
|
||||
private final Map<KStream<?, ?>, Serde<?>> keySerdeInfo = new HashMap<>();
|
||||
|
||||
/**
|
||||
* For a given bounded {@link KStream}, retrieve it's corresponding destination on the
|
||||
* broker.
|
||||
* @param bindingTarget binding target for KStream
|
||||
* @return destination topic on Kafka
|
||||
*/
|
||||
String getDestination(KStream<?, ?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
return bindingProperties.getDestination();
|
||||
}
|
||||
|
||||
/**
|
||||
* Is native decoding is enabled on this {@link KStream}.
|
||||
* @param bindingTarget binding target for KStream
|
||||
* @return true if native decoding is enabled, fasle otherwise.
|
||||
*/
|
||||
boolean isUseNativeDecoding(KStream<?, ?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
if (bindingProperties.getConsumer() == null) {
|
||||
bindingProperties.setConsumer(new ConsumerProperties());
|
||||
}
|
||||
return bindingProperties.getConsumer().isUseNativeDecoding();
|
||||
}
|
||||
|
||||
/**
|
||||
* Is DLQ enabled for this {@link KStream}.
|
||||
* @param bindingTarget binding target for KStream
|
||||
* @return true if DLQ is enabled, false otherwise.
|
||||
*/
|
||||
boolean isDlqEnabled(KStream<?, ?> bindingTarget) {
|
||||
return this.consumerProperties.get(bindingTarget).isEnableDlq();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the content type associated with a given {@link KStream}.
|
||||
* @param bindingTarget binding target for KStream
|
||||
* @return content Type associated.
|
||||
*/
|
||||
String getContentType(KStream<?, ?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
return bindingProperties.getContentType();
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a cache for bounded KStream -> {@link BindingProperties}.
|
||||
* @param bindingTarget binding target for KStream
|
||||
* @param bindingProperties {@link BindingProperties} for this KStream
|
||||
*/
|
||||
void registerBindingProperties(KStream<?, ?> bindingTarget,
|
||||
BindingProperties bindingProperties) {
|
||||
this.bindingProperties.put(bindingTarget, bindingProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a cache for bounded KStream -> {@link KafkaStreamsConsumerProperties}.
|
||||
* @param bindingTarget binding target for KStream
|
||||
* @param kafkaStreamsConsumerProperties consumer properties for this KStream
|
||||
*/
|
||||
void registerConsumerProperties(KStream<?, ?> bindingTarget,
|
||||
KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
this.consumerProperties.put(bindingTarget, kafkaStreamsConsumerProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a mapping for KStream -> {@link StreamsBuilderFactoryBean}.
|
||||
* @param streamsBuilderFactoryBean provides the {@link StreamsBuilderFactoryBean}
|
||||
* mapped to the KStream
|
||||
*/
|
||||
void addStreamBuilderFactory(StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
this.streamsBuilderFactoryBeans.add(streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
Set<StreamsBuilderFactoryBean> getStreamsBuilderFactoryBeans() {
|
||||
return this.streamsBuilderFactoryBeans;
|
||||
}
|
||||
|
||||
void setOutboundKStreamResolvable(ResolvableType outboundResolvable) {
|
||||
this.outboundKStreamResolvable = outboundResolvable;
|
||||
}
|
||||
|
||||
ResolvableType getOutboundKStreamResolvable() {
|
||||
return outboundKStreamResolvable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adding a mapping for KStream target to its corresponding KeySerde.
|
||||
* This is used for sending to DLQ when deserialization fails. See {@link KafkaStreamsMessageConversionDelegate}
|
||||
* for details.
|
||||
*
|
||||
* @param kStreamTarget target KStream
|
||||
* @param keySerde Serde used for the key
|
||||
*/
|
||||
void addKeySerde(KStream<?, ?> kStreamTarget, Serde<?> keySerde) {
|
||||
this.keySerdeInfo.put(kStreamTarget, keySerde);
|
||||
}
|
||||
|
||||
Serde<?> getKeySerde(KStream<?, ?> kStreamTarget) {
|
||||
return this.keySerdeInfo.get(kStreamTarget);
|
||||
}
|
||||
|
||||
|
||||
Map<KStream<?, ?>, BindingProperties> getBindingProperties() {
|
||||
return bindingProperties;
|
||||
}
|
||||
|
||||
Map<KStream<?, ?>, KafkaStreamsConsumerProperties> getConsumerProperties() {
|
||||
return consumerProperties;
|
||||
}
|
||||
}
|
||||
@@ -1,369 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.Topology;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanFactory;
|
||||
import org.springframework.beans.factory.BeanFactoryAware;
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.beans.factory.support.RootBeanDefinition;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.function.KafkaStreamsBindableProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerErrorMessages;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.function.FunctionConstants;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanCustomizer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @since 2.2.0
|
||||
*/
|
||||
public class KafkaStreamsFunctionProcessor extends AbstractKafkaStreamsBinderProcessor implements BeanFactoryAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(KafkaStreamsFunctionProcessor.class);
|
||||
private static final String OUTBOUND = "outbound";
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final Map<String, StreamsBuilderFactoryBean> methodStreamsBuilderFactoryBeanMap = new HashMap<>();
|
||||
private final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private BeanFactory beanFactory;
|
||||
private StreamFunctionProperties streamFunctionProperties;
|
||||
private KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties;
|
||||
StreamsBuilderFactoryBeanCustomizer customizer;
|
||||
|
||||
public KafkaStreamsFunctionProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
CleanupConfig cleanupConfig,
|
||||
StreamFunctionProperties streamFunctionProperties,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
StreamsBuilderFactoryBeanCustomizer customizer) {
|
||||
super(bindingServiceProperties, kafkaStreamsBindingInformationCatalogue, kafkaStreamsExtendedBindingProperties,
|
||||
keyValueSerdeResolver, cleanupConfig);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.streamFunctionProperties = streamFunctionProperties;
|
||||
this.kafkaStreamsBinderConfigurationProperties = kafkaStreamsBinderConfigurationProperties;
|
||||
this.customizer = customizer;
|
||||
}
|
||||
|
||||
private Map<String, ResolvableType> buildTypeMap(ResolvableType resolvableType,
|
||||
KafkaStreamsBindableProxyFactory kafkaStreamsBindableProxyFactory) {
|
||||
Map<String, ResolvableType> resolvableTypeMap = new LinkedHashMap<>();
|
||||
if (resolvableType != null && resolvableType.getRawClass() != null) {
|
||||
int inputCount = 1;
|
||||
|
||||
ResolvableType currentOutputGeneric;
|
||||
if (resolvableType.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
resolvableType.getRawClass().isAssignableFrom(BiConsumer.class)) {
|
||||
inputCount = 2;
|
||||
currentOutputGeneric = resolvableType.getGeneric(2);
|
||||
}
|
||||
else {
|
||||
currentOutputGeneric = resolvableType.getGeneric(1);
|
||||
}
|
||||
while (currentOutputGeneric.getRawClass() != null && functionOrConsumerFound(currentOutputGeneric)) {
|
||||
inputCount++;
|
||||
currentOutputGeneric = currentOutputGeneric.getGeneric(1);
|
||||
}
|
||||
final Set<String> inputs = new LinkedHashSet<>(kafkaStreamsBindableProxyFactory.getInputs());
|
||||
|
||||
final Iterator<String> iterator = inputs.iterator();
|
||||
|
||||
popuateResolvableTypeMap(resolvableType, resolvableTypeMap, iterator);
|
||||
|
||||
ResolvableType iterableResType = resolvableType;
|
||||
int i = resolvableType.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
resolvableType.getRawClass().isAssignableFrom(BiConsumer.class) ? 2 : 1;
|
||||
ResolvableType outboundResolvableType;
|
||||
if (i == inputCount) {
|
||||
outboundResolvableType = iterableResType.getGeneric(i);
|
||||
}
|
||||
else {
|
||||
while (i < inputCount && iterator.hasNext()) {
|
||||
iterableResType = iterableResType.getGeneric(1);
|
||||
if (iterableResType.getRawClass() != null &&
|
||||
functionOrConsumerFound(iterableResType)) {
|
||||
popuateResolvableTypeMap(iterableResType, resolvableTypeMap, iterator);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
outboundResolvableType = iterableResType.getGeneric(1);
|
||||
}
|
||||
resolvableTypeMap.put(OUTBOUND, outboundResolvableType);
|
||||
}
|
||||
return resolvableTypeMap;
|
||||
}
|
||||
|
||||
private boolean functionOrConsumerFound(ResolvableType iterableResType) {
|
||||
return iterableResType.getRawClass().equals(Function.class) ||
|
||||
iterableResType.getRawClass().equals(Consumer.class);
|
||||
}
|
||||
|
||||
private void popuateResolvableTypeMap(ResolvableType resolvableType, Map<String, ResolvableType> resolvableTypeMap,
|
||||
Iterator<String> iterator) {
|
||||
final String next = iterator.next();
|
||||
resolvableTypeMap.put(next, resolvableType.getGeneric(0));
|
||||
if (resolvableType.getRawClass() != null &&
|
||||
(resolvableType.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
resolvableType.getRawClass().isAssignableFrom(BiConsumer.class))
|
||||
&& iterator.hasNext()) {
|
||||
resolvableTypeMap.put(iterator.next(), resolvableType.getGeneric(1));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method must be kept stateless. In the case of multiple function beans in an application,
|
||||
* isolated {@link KafkaStreamsBindableProxyFactory} instances are passed in separately for those functions. If the
|
||||
* state is shared between invocations, that will create potential race conditions. Hence, invocations of this method
|
||||
* should not be dependent on state modified by a previous invocation.
|
||||
*
|
||||
* @param resolvableType type of the binding
|
||||
* @param functionName bean name of the function
|
||||
* @param kafkaStreamsBindableProxyFactory bindable proxy factory for the Kafka Streams type
|
||||
*/
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
public void setupFunctionInvokerForKafkaStreams(ResolvableType resolvableType, String functionName,
|
||||
KafkaStreamsBindableProxyFactory kafkaStreamsBindableProxyFactory) {
|
||||
final Map<String, ResolvableType> stringResolvableTypeMap = buildTypeMap(resolvableType, kafkaStreamsBindableProxyFactory);
|
||||
ResolvableType outboundResolvableType = stringResolvableTypeMap.remove(OUTBOUND);
|
||||
Object[] adaptedInboundArguments = adaptAndRetrieveInboundArguments(stringResolvableTypeMap, functionName);
|
||||
try {
|
||||
if (resolvableType.getRawClass() != null && resolvableType.getRawClass().equals(Consumer.class)) {
|
||||
Consumer<Object> consumer = (Consumer) this.beanFactory.getBean(functionName);
|
||||
consumer.accept(adaptedInboundArguments[0]);
|
||||
}
|
||||
else if (resolvableType.getRawClass() != null && resolvableType.getRawClass().equals(BiConsumer.class)) {
|
||||
BiConsumer<Object, Object> biConsumer = (BiConsumer) this.beanFactory.getBean(functionName);
|
||||
biConsumer.accept(adaptedInboundArguments[0], adaptedInboundArguments[1]);
|
||||
}
|
||||
else {
|
||||
Object result;
|
||||
if (resolvableType.getRawClass() != null && resolvableType.getRawClass().equals(BiFunction.class)) {
|
||||
BiFunction<Object, Object, Object> biFunction = (BiFunction) beanFactory.getBean(functionName);
|
||||
result = biFunction.apply(adaptedInboundArguments[0], adaptedInboundArguments[1]);
|
||||
}
|
||||
else {
|
||||
Function<Object, Object> function = (Function) beanFactory.getBean(functionName);
|
||||
result = function.apply(adaptedInboundArguments[0]);
|
||||
}
|
||||
int i = 1;
|
||||
while (result instanceof Function || result instanceof Consumer) {
|
||||
if (result instanceof Function) {
|
||||
result = ((Function) result).apply(adaptedInboundArguments[i]);
|
||||
}
|
||||
else {
|
||||
((Consumer) result).accept(adaptedInboundArguments[i]);
|
||||
result = null;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if (result != null) {
|
||||
kafkaStreamsBindingInformationCatalogue.setOutboundKStreamResolvable(
|
||||
outboundResolvableType != null ? outboundResolvableType : resolvableType.getGeneric(1));
|
||||
final Set<String> outputs = new TreeSet<>(kafkaStreamsBindableProxyFactory.getOutputs());
|
||||
final Iterator<String> outboundDefinitionIterator = outputs.iterator();
|
||||
|
||||
if (result.getClass().isArray()) {
|
||||
// Binding target as the output bindings were deferred in the KafkaStreamsBindableProxyFactory
|
||||
// due to the fact that it didn't know the returned array size. At this point in the execution,
|
||||
// we know exactly the number of outbound components (from the array length), so do the binding.
|
||||
final int length = ((Object[]) result).length;
|
||||
|
||||
List<String> outputBindings = getOutputBindings(functionName, length);
|
||||
Iterator<String> iterator = outputBindings.iterator();
|
||||
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) beanFactory;
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
|
||||
for (int ij = 0; ij < length; ij++) {
|
||||
|
||||
String next = iterator.next();
|
||||
kafkaStreamsBindableProxyFactory.addOutputBinding(next, KStream.class);
|
||||
RootBeanDefinition rootBeanDefinition1 = new RootBeanDefinition();
|
||||
rootBeanDefinition1.setInstanceSupplier(() -> kafkaStreamsBindableProxyFactory.getOutputHolders().get(next).getBoundTarget());
|
||||
registry.registerBeanDefinition(next, rootBeanDefinition1);
|
||||
|
||||
Object targetBean = this.applicationContext.getBean(next);
|
||||
|
||||
KStreamBoundElementFactory.KStreamWrapper
|
||||
boundElement = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
boundElement.wrap((KStream) outboundKStreams[ij]);
|
||||
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (outboundDefinitionIterator.hasNext()) {
|
||||
final String next = outboundDefinitionIterator.next();
|
||||
Object targetBean = this.applicationContext.getBean(next);
|
||||
KStreamBoundElementFactory.KStreamWrapper
|
||||
boundElement = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
boundElement.wrap((KStream) result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new BeanInitializationException("Cannot setup function invoker for this Kafka Streams function.", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private List<String> getOutputBindings(String functionName, int outputs) {
|
||||
List<String> outputBindings = this.streamFunctionProperties.getOutputBindings(functionName);
|
||||
List<String> outputBindingNames = new ArrayList<>();
|
||||
if (!CollectionUtils.isEmpty(outputBindings)) {
|
||||
outputBindingNames.addAll(outputBindings);
|
||||
return outputBindingNames;
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < outputs; i++) {
|
||||
outputBindingNames.add(String.format("%s-%s-%d", functionName, FunctionConstants.DEFAULT_OUTPUT_SUFFIX, i));
|
||||
}
|
||||
}
|
||||
return outputBindingNames;
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private Object[] adaptAndRetrieveInboundArguments(Map<String, ResolvableType> stringResolvableTypeMap,
|
||||
String functionName) {
|
||||
Object[] arguments = new Object[stringResolvableTypeMap.size()];
|
||||
int i = 0;
|
||||
for (String input : stringResolvableTypeMap.keySet()) {
|
||||
Class<?> parameterType = stringResolvableTypeMap.get(input).getRawClass();
|
||||
|
||||
if (input != null) {
|
||||
Object targetBean = applicationContext.getBean(input);
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(input);
|
||||
//Retrieve the StreamsConfig created for this method if available.
|
||||
//Otherwise, create the StreamsBuilderFactory and get the underlying config.
|
||||
if (!this.methodStreamsBuilderFactoryBeanMap.containsKey(functionName)) {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = buildStreamsBuilderAndRetrieveConfig(functionName, applicationContext,
|
||||
input, kafkaStreamsBinderConfigurationProperties, customizer);
|
||||
this.methodStreamsBuilderFactoryBeanMap.put(functionName, streamsBuilderFactoryBean);
|
||||
}
|
||||
try {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean =
|
||||
this.methodStreamsBuilderFactoryBeanMap.get(functionName);
|
||||
StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject();
|
||||
final String applicationId = streamsBuilderFactoryBean.getStreamsConfiguration().getProperty(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties =
|
||||
this.kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(input);
|
||||
extendedConsumerProperties.setApplicationId(applicationId);
|
||||
//get state store spec
|
||||
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getInboundKeySerde(extendedConsumerProperties, stringResolvableTypeMap.get(input));
|
||||
LOG.info("Key Serde used for " + input + ": " + keySerde.getClass().getName());
|
||||
Serde<?> valueSerde = bindingServiceProperties.getConsumerProperties(input).isUseNativeDecoding() ?
|
||||
getValueSerde(input, extendedConsumerProperties, stringResolvableTypeMap.get(input)) : Serdes.ByteArray();
|
||||
LOG.info("Value Serde used for " + input + ": " + valueSerde.getClass().getName());
|
||||
final Topology.AutoOffsetReset autoOffsetReset = getAutoOffsetReset(input, extendedConsumerProperties);
|
||||
|
||||
if (parameterType.isAssignableFrom(KStream.class)) {
|
||||
KStream<?, ?> stream = getKStream(input, bindingProperties, extendedConsumerProperties,
|
||||
streamsBuilder, keySerde, valueSerde, autoOffsetReset, i == 0);
|
||||
KStreamBoundElementFactory.KStreamWrapper kStreamWrapper =
|
||||
(KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KStream)
|
||||
kStreamWrapper.wrap((KStream<Object, Object>) stream);
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.addKeySerde((KStream<?, ?>) kStreamWrapper, keySerde);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
|
||||
if (KStream.class.isAssignableFrom(stringResolvableTypeMap.get(input).getRawClass())) {
|
||||
final Class<?> valueClass =
|
||||
(stringResolvableTypeMap.get(input).getGeneric(1).getRawClass() != null)
|
||||
? (stringResolvableTypeMap.get(input).getGeneric(1).getRawClass()) : Object.class;
|
||||
if (this.kafkaStreamsBindingInformationCatalogue.isUseNativeDecoding(
|
||||
(KStream<?, ?>) kStreamWrapper)) {
|
||||
arguments[i] = stream;
|
||||
}
|
||||
else {
|
||||
arguments[i] = this.kafkaStreamsMessageConversionDelegate.deserializeOnInbound(
|
||||
valueClass, stream);
|
||||
}
|
||||
}
|
||||
|
||||
if (arguments[i] == null) {
|
||||
arguments[i] = stream;
|
||||
}
|
||||
Assert.notNull(arguments[i], "Problems encountered while adapting the function argument.");
|
||||
}
|
||||
else {
|
||||
handleKTableGlobalKTableInputs(arguments, i, input, parameterType, targetBean, streamsBuilderFactoryBean,
|
||||
streamsBuilder, extendedConsumerProperties, keySerde, valueSerde, autoOffsetReset);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new IllegalStateException(ex);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(StreamListenerErrorMessages.INVALID_DECLARATIVE_METHOD_PARAMETERS);
|
||||
}
|
||||
}
|
||||
return arguments;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
|
||||
this.beanFactory = beanFactory;
|
||||
}
|
||||
}
|
||||
@@ -1,345 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.CompositeMessageConverter;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Delegate for handling all framework level message conversions inbound and outbound on
|
||||
* {@link KStream}. If native encoding is not enabled, then serialization will be
|
||||
* performed on outbound messages based on a contentType. Similarly, if native decoding is
|
||||
* not enabled, deserialization will be performed on inbound messages based on a
|
||||
* contentType. Based on the contentType, a {@link MessageConverter} will be resolved.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsMessageConversionDelegate {
|
||||
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(KafkaStreamsMessageConversionDelegate.class);
|
||||
|
||||
private static final ThreadLocal<KeyValue<Object, Object>> keyValueThreadLocal = new ThreadLocal<>();
|
||||
|
||||
private final CompositeMessageConverter compositeMessageConverter;
|
||||
|
||||
private final SendToDlqAndContinue sendToDlqAndContinue;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kstreamBindingInformationCatalogue;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties;
|
||||
|
||||
Exception[] failedWithDeserException = new Exception[1];
|
||||
|
||||
KafkaStreamsMessageConversionDelegate(
|
||||
CompositeMessageConverter compositeMessageConverter,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue kstreamBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties) {
|
||||
this.compositeMessageConverter = compositeMessageConverter;
|
||||
this.sendToDlqAndContinue = sendToDlqAndContinue;
|
||||
this.kstreamBindingInformationCatalogue = kstreamBindingInformationCatalogue;
|
||||
this.kstreamBinderConfigurationProperties = kstreamBinderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize {@link KStream} records on outbound based on contentType.
|
||||
* @param outboundBindTarget outbound KStream target
|
||||
* @return serialized KStream
|
||||
*/
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
public KStream serializeOnOutbound(KStream<?, ?> outboundBindTarget) {
|
||||
String contentType = this.kstreamBindingInformationCatalogue
|
||||
.getContentType(outboundBindTarget);
|
||||
MessageConverter messageConverter = this.compositeMessageConverter;
|
||||
final PerRecordContentTypeHolder perRecordContentTypeHolder = new PerRecordContentTypeHolder();
|
||||
|
||||
final KStream<?, ?> kStreamWithEnrichedHeaders = outboundBindTarget
|
||||
.filter((k, v) -> v != null)
|
||||
.mapValues((v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>) v
|
||||
: MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
final Message<?> convertedMessage = messageConverter.toMessage(message.getPayload(), messageHeaders);
|
||||
perRecordContentTypeHolder.setContentType((String) messageHeaders.get(MessageHeaders.CONTENT_TYPE));
|
||||
return convertedMessage.getPayload();
|
||||
});
|
||||
|
||||
kStreamWithEnrichedHeaders.process(() -> new Processor() {
|
||||
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object key, Object value) {
|
||||
if (perRecordContentTypeHolder.contentType != null) {
|
||||
this.context.headers().remove(MessageHeaders.CONTENT_TYPE);
|
||||
final Header header;
|
||||
try {
|
||||
header = new RecordHeader(MessageHeaders.CONTENT_TYPE,
|
||||
new ObjectMapper().writeValueAsBytes(perRecordContentTypeHolder.contentType));
|
||||
this.context.headers().add(header);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Could not add content type header");
|
||||
}
|
||||
}
|
||||
perRecordContentTypeHolder.unsetContentType();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
});
|
||||
|
||||
return kStreamWithEnrichedHeaders;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deserialize incoming {@link KStream} based on content type.
|
||||
* @param valueClass on KStream value
|
||||
* @param bindingTarget inbound KStream target
|
||||
* @return deserialized KStream
|
||||
*/
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
public KStream deserializeOnInbound(Class<?> valueClass,
|
||||
KStream<?, ?> bindingTarget) {
|
||||
MessageConverter messageConverter = this.compositeMessageConverter;
|
||||
final PerRecordContentTypeHolder perRecordContentTypeHolder = new PerRecordContentTypeHolder();
|
||||
|
||||
resolvePerRecordContentType(bindingTarget, perRecordContentTypeHolder);
|
||||
|
||||
// Deserialize using a branching strategy
|
||||
KStream<?, ?>[] branch = bindingTarget.branch(
|
||||
// First filter where the message is converted and return true if
|
||||
// everything went well, return false otherwise.
|
||||
(o, o2) -> {
|
||||
boolean isValidRecord = false;
|
||||
|
||||
try {
|
||||
// if the record is a tombstone, ignore and exit from processing
|
||||
// further.
|
||||
if (o2 != null) {
|
||||
if (o2 instanceof Message || o2 instanceof String
|
||||
|| o2 instanceof byte[]) {
|
||||
Message<?> m1 = null;
|
||||
if (o2 instanceof Message) {
|
||||
m1 = perRecordContentTypeHolder.contentType != null
|
||||
? MessageBuilder.fromMessage((Message<?>) o2)
|
||||
.setHeader(
|
||||
MessageHeaders.CONTENT_TYPE,
|
||||
perRecordContentTypeHolder.contentType)
|
||||
.build()
|
||||
: (Message<?>) o2;
|
||||
}
|
||||
else {
|
||||
m1 = perRecordContentTypeHolder.contentType != null
|
||||
? MessageBuilder.withPayload(o2).setHeader(
|
||||
MessageHeaders.CONTENT_TYPE,
|
||||
perRecordContentTypeHolder.contentType)
|
||||
.build()
|
||||
: MessageBuilder.withPayload(o2).build();
|
||||
}
|
||||
convertAndSetMessage(o, valueClass, messageConverter, m1);
|
||||
}
|
||||
else {
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, o2));
|
||||
}
|
||||
isValidRecord = true;
|
||||
}
|
||||
else {
|
||||
LOG.info(
|
||||
"Received a tombstone record. This will be skipped from further processing.");
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.warn(
|
||||
"Deserialization has failed. This will be skipped from further processing.",
|
||||
e);
|
||||
// pass through
|
||||
failedWithDeserException[0] = e;
|
||||
}
|
||||
return isValidRecord;
|
||||
},
|
||||
// second filter that catches any messages for which an exception thrown
|
||||
// in the first filter above.
|
||||
(k, v) -> true);
|
||||
// process errors from the second filter in the branch above.
|
||||
processErrorFromDeserialization(bindingTarget, branch[1], failedWithDeserException);
|
||||
|
||||
// first branch above is the branch where the messages are converted, let it go
|
||||
// through further processing.
|
||||
return branch[0].mapValues((o2) -> {
|
||||
Object objectValue = keyValueThreadLocal.get().value;
|
||||
keyValueThreadLocal.remove();
|
||||
return objectValue;
|
||||
});
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
private void resolvePerRecordContentType(KStream<?, ?> outboundBindTarget,
|
||||
PerRecordContentTypeHolder perRecordContentTypeHolder) {
|
||||
outboundBindTarget.process(() -> new Processor() {
|
||||
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object key, Object value) {
|
||||
final Headers headers = this.context.headers();
|
||||
final Iterable<Header> contentTypes = headers
|
||||
.headers(MessageHeaders.CONTENT_TYPE);
|
||||
if (contentTypes != null && contentTypes.iterator().hasNext()) {
|
||||
final String contentType = new String(
|
||||
contentTypes.iterator().next().value());
|
||||
// remove leading and trailing quotes
|
||||
final String cleanContentType = StringUtils.replace(contentType, "\"",
|
||||
"");
|
||||
perRecordContentTypeHolder.setContentType(cleanContentType);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void convertAndSetMessage(Object o, Class<?> valueClass,
|
||||
MessageConverter messageConverter, Message<?> msg) {
|
||||
Object result = valueClass.isAssignableFrom(msg.getPayload().getClass())
|
||||
? msg.getPayload() : messageConverter.fromMessage(msg, valueClass);
|
||||
|
||||
Assert.notNull(result, "Failed to convert message " + msg);
|
||||
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, result));
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
private void processErrorFromDeserialization(KStream<?, ?> bindingTarget,
|
||||
KStream<?, ?> branch, Exception[] exception) {
|
||||
branch.process(() -> new Processor() {
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, Object o2) {
|
||||
// Only continue if the record was not a tombstone.
|
||||
if (o2 != null) {
|
||||
if (KafkaStreamsMessageConversionDelegate.this.kstreamBindingInformationCatalogue
|
||||
.isDlqEnabled(bindingTarget)) {
|
||||
if (o2 instanceof Message) {
|
||||
Message message = (Message) o2;
|
||||
|
||||
// We need to convert the key to a byte[] before sending to DLQ.
|
||||
Serde keySerde = kstreamBindingInformationCatalogue.getKeySerde(bindingTarget);
|
||||
Serializer keySerializer = keySerde.serializer();
|
||||
byte[] keyBytes = keySerializer.serialize(null, o);
|
||||
|
||||
ConsumerRecord consumerRecord = new ConsumerRecord(this.context.topic(), this.context.partition(), this.context.offset(),
|
||||
keyBytes, message.getPayload());
|
||||
|
||||
KafkaStreamsMessageConversionDelegate.this.sendToDlqAndContinue
|
||||
.sendToDlq(consumerRecord, exception[0]);
|
||||
}
|
||||
else {
|
||||
ConsumerRecord consumerRecord = new ConsumerRecord(this.context.topic(), this.context.partition(), this.context.offset(),
|
||||
o, o2);
|
||||
KafkaStreamsMessageConversionDelegate.this.sendToDlqAndContinue
|
||||
.sendToDlq(consumerRecord, exception[0]);
|
||||
}
|
||||
}
|
||||
else if (KafkaStreamsMessageConversionDelegate.this.kstreamBinderConfigurationProperties
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndFail) {
|
||||
throw new IllegalStateException("Inbound deserialization failed. "
|
||||
+ "Stopping further processing of records.");
|
||||
}
|
||||
else if (KafkaStreamsMessageConversionDelegate.this.kstreamBinderConfigurationProperties
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndContinue) {
|
||||
// quietly passing through. No action needed, this is similar to
|
||||
// log and continue.
|
||||
LOG.error(
|
||||
"Inbound deserialization failed. Skipping this record and continuing.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static class PerRecordContentTypeHolder {
|
||||
|
||||
String contentType;
|
||||
|
||||
void setContentType(String contentType) {
|
||||
this.contentType = contentType;
|
||||
}
|
||||
|
||||
void unsetContentType() {
|
||||
this.contentType = null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* An internal registry for holding {@KafkaStreams} objects maintained through
|
||||
* {@link StreamsBuilderFactoryManager}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsRegistry {
|
||||
|
||||
private Map<KafkaStreams, StreamsBuilderFactoryBean> streamsBuilderFactoryBeanMap = new HashMap<>();
|
||||
|
||||
private final Set<KafkaStreams> kafkaStreams = new HashSet<>();
|
||||
|
||||
Set<KafkaStreams> getKafkaStreams() {
|
||||
return this.kafkaStreams;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the {@link KafkaStreams} object created in the application.
|
||||
* @param streamsBuilderFactoryBean {@link StreamsBuilderFactoryBean}
|
||||
*/
|
||||
void registerKafkaStreams(StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
final KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
this.kafkaStreams.add(kafkaStreams);
|
||||
this.streamsBuilderFactoryBeanMap.put(kafkaStreams, streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param kafkaStreams {@link KafkaStreams} object
|
||||
* @return Corresponding {@link StreamsBuilderFactoryBean}.
|
||||
*/
|
||||
StreamsBuilderFactoryBean streamBuilderFactoryBean(KafkaStreams kafkaStreams) {
|
||||
return this.streamsBuilderFactoryBeanMap.get(kafkaStreams);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,484 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.Topology;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
import org.apache.kafka.streams.state.Stores;
|
||||
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsStateStore;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerErrorMessages;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerSetupMethodOrchestrator;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.core.annotation.AnnotationUtils;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanCustomizer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka Streams specific implementation for {@link StreamListenerSetupMethodOrchestrator}
|
||||
* that overrides the default mechanisms for invoking StreamListener adapters.
|
||||
* <p>
|
||||
* The orchestration primarily focus on the following areas:
|
||||
* <p>
|
||||
* 1. Allow multiple KStream output bindings (KStream branching) by allowing more than one
|
||||
* output values on {@link SendTo} 2. Allow multiple inbound bindings for multiple KStream
|
||||
* and or KTable/GlobalKTable types. 3. Each StreamListener method that it orchestrates
|
||||
* gets its own {@link StreamsBuilderFactoryBean} and {@link StreamsConfig}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Lei Chen
|
||||
* @author Gary Russell
|
||||
*/
|
||||
class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStreamsBinderProcessor
|
||||
implements StreamListenerSetupMethodOrchestrator {
|
||||
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(KafkaStreamsStreamListenerSetupMethodOrchestrator.class);
|
||||
|
||||
private final StreamListenerParameterAdapter streamListenerParameterAdapter;
|
||||
|
||||
private final Collection<StreamListenerResultAdapter> streamListenerResultAdapters;
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final Map<Method, List<String>> registeredStoresPerMethod = new HashMap<>();
|
||||
|
||||
private final Map<Method, StreamsBuilderFactoryBean> methodStreamsBuilderFactoryBeanMap = new HashMap<>();
|
||||
|
||||
StreamsBuilderFactoryBeanCustomizer customizer;
|
||||
|
||||
KafkaStreamsStreamListenerSetupMethodOrchestrator(
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties extendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue bindingInformationCatalogue,
|
||||
StreamListenerParameterAdapter streamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> listenerResultAdapters,
|
||||
CleanupConfig cleanupConfig,
|
||||
StreamsBuilderFactoryBeanCustomizer customizer) {
|
||||
super(bindingServiceProperties, bindingInformationCatalogue, extendedBindingProperties, keyValueSerdeResolver, cleanupConfig);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsExtendedBindingProperties = extendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.kafkaStreamsBindingInformationCatalogue = bindingInformationCatalogue;
|
||||
this.streamListenerParameterAdapter = streamListenerParameterAdapter;
|
||||
this.streamListenerResultAdapters = listenerResultAdapters;
|
||||
this.customizer = customizer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(Method method) {
|
||||
return methodParameterSupports(method) && (methodReturnTypeSuppports(method)
|
||||
|| Void.TYPE.equals(method.getReturnType()));
|
||||
}
|
||||
|
||||
private boolean methodReturnTypeSuppports(Method method) {
|
||||
Class<?> returnType = method.getReturnType();
|
||||
if (returnType.equals(KStream.class) || (returnType.isArray()
|
||||
&& returnType.getComponentType().equals(KStream.class))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean methodParameterSupports(Method method) {
|
||||
boolean supports = false;
|
||||
for (int i = 0; i < method.getParameterCount(); i++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, i);
|
||||
Class<?> parameterType = methodParameter.getParameterType();
|
||||
if (parameterType.equals(KStream.class) || parameterType.equals(KTable.class)
|
||||
|| parameterType.equals(GlobalKTable.class)) {
|
||||
supports = true;
|
||||
}
|
||||
}
|
||||
return supports;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
public void orchestrateStreamListenerSetupMethod(StreamListener streamListener,
|
||||
Method method, Object bean) {
|
||||
String[] methodAnnotatedOutboundNames = getOutboundBindingTargetNames(method);
|
||||
validateStreamListenerMethod(streamListener, method,
|
||||
methodAnnotatedOutboundNames);
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
Object[] adaptedInboundArguments = adaptAndRetrieveInboundArguments(method,
|
||||
methodAnnotatedInboundName, this.applicationContext,
|
||||
this.streamListenerParameterAdapter);
|
||||
try {
|
||||
ReflectionUtils.makeAccessible(method);
|
||||
if (Void.TYPE.equals(method.getReturnType())) {
|
||||
method.invoke(bean, adaptedInboundArguments);
|
||||
}
|
||||
else {
|
||||
Object result = method.invoke(bean, adaptedInboundArguments);
|
||||
|
||||
if (methodAnnotatedOutboundNames != null && methodAnnotatedOutboundNames.length > 0) {
|
||||
if (result.getClass().isArray()) {
|
||||
Assert.isTrue(
|
||||
methodAnnotatedOutboundNames.length == ((Object[]) result).length,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
else {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == 1,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
}
|
||||
kafkaStreamsBindingInformationCatalogue.setOutboundKStreamResolvable(ResolvableType.forMethodReturnType(method));
|
||||
if (methodAnnotatedOutboundNames != null && methodAnnotatedOutboundNames.length > 0) {
|
||||
if (result.getClass().isArray()) {
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
int i = 0;
|
||||
for (Object outboundKStream : outboundKStreams) {
|
||||
Object targetBean = this.applicationContext
|
||||
.getBean(methodAnnotatedOutboundNames[i++]);
|
||||
adaptStreamListenerResult(outboundKStream, targetBean);
|
||||
}
|
||||
}
|
||||
else {
|
||||
Object targetBean = this.applicationContext
|
||||
.getBean(methodAnnotatedOutboundNames[0]);
|
||||
adaptStreamListenerResult(result, targetBean);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new BeanInitializationException(
|
||||
"Cannot setup StreamListener for " + method, ex);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void adaptStreamListenerResult(Object outboundKStream, Object targetBean) {
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : this.streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(
|
||||
outboundKStream.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(outboundKStream,
|
||||
targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"unchecked"})
|
||||
public Object[] adaptAndRetrieveInboundArguments(Method method, String inboundName,
|
||||
ApplicationContext applicationContext,
|
||||
StreamListenerParameterAdapter... adapters) {
|
||||
Object[] arguments = new Object[method.getParameterTypes().length];
|
||||
for (int parameterIndex = 0; parameterIndex < arguments.length; parameterIndex++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method,
|
||||
parameterIndex);
|
||||
Class<?> parameterType = methodParameter.getParameterType();
|
||||
Object targetReferenceValue = null;
|
||||
if (methodParameter.hasParameterAnnotation(Input.class)) {
|
||||
targetReferenceValue = AnnotationUtils
|
||||
.getValue(methodParameter.getParameterAnnotation(Input.class));
|
||||
Input methodAnnotation = methodParameter
|
||||
.getParameterAnnotation(Input.class);
|
||||
inboundName = methodAnnotation.value();
|
||||
}
|
||||
else if (arguments.length == 1 && StringUtils.hasText(inboundName)) {
|
||||
targetReferenceValue = inboundName;
|
||||
}
|
||||
if (targetReferenceValue != null) {
|
||||
Assert.isInstanceOf(String.class, targetReferenceValue,
|
||||
"Annotation value must be a String");
|
||||
Object targetBean = applicationContext
|
||||
.getBean((String) targetReferenceValue);
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties
|
||||
.getBindingProperties(inboundName);
|
||||
// Retrieve the StreamsConfig created for this method if available.
|
||||
// Otherwise, create the StreamsBuilderFactory and get the underlying
|
||||
// config.
|
||||
if (!this.methodStreamsBuilderFactoryBeanMap.containsKey(method)) {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = buildStreamsBuilderAndRetrieveConfig(method.getDeclaringClass().getSimpleName() + "-" + method.getName(),
|
||||
applicationContext,
|
||||
inboundName, null, customizer);
|
||||
this.methodStreamsBuilderFactoryBeanMap.put(method, streamsBuilderFactoryBean);
|
||||
}
|
||||
try {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.methodStreamsBuilderFactoryBeanMap
|
||||
.get(method);
|
||||
StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject();
|
||||
final String applicationId = streamsBuilderFactoryBean.getStreamsConfiguration().getProperty(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName);
|
||||
extendedConsumerProperties.setApplicationId(applicationId);
|
||||
// get state store spec
|
||||
KafkaStreamsStateStoreProperties spec = buildStateStoreSpec(method);
|
||||
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver
|
||||
.getInboundKeySerde(extendedConsumerProperties, ResolvableType.forMethodParameter(methodParameter));
|
||||
LOG.info("Key Serde used for " + targetReferenceValue + ": " + keySerde.getClass().getName());
|
||||
|
||||
Serde<?> valueSerde = bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding() ?
|
||||
getValueSerde(inboundName, extendedConsumerProperties, ResolvableType.forMethodParameter(methodParameter)) : Serdes.ByteArray();
|
||||
LOG.info("Value Serde used for " + targetReferenceValue + ": " + valueSerde.getClass().getName());
|
||||
|
||||
Topology.AutoOffsetReset autoOffsetReset = getAutoOffsetReset(inboundName, extendedConsumerProperties);
|
||||
|
||||
if (parameterType.isAssignableFrom(KStream.class)) {
|
||||
KStream<?, ?> stream = getkStream(inboundName, spec,
|
||||
bindingProperties, extendedConsumerProperties, streamsBuilder, keySerde, valueSerde,
|
||||
autoOffsetReset, parameterIndex == 0);
|
||||
KStreamBoundElementFactory.KStreamWrapper kStreamWrapper = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
// wrap the proxy created during the initial target type binding
|
||||
// with real object (KStream)
|
||||
kStreamWrapper.wrap((KStream<Object, Object>) stream);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addKeySerde(stream, keySerde);
|
||||
BindingProperties bindingProperties1 = this.kafkaStreamsBindingInformationCatalogue.getBindingProperties().get(kStreamWrapper);
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerBindingProperties(stream, bindingProperties1);
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue
|
||||
.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
for (StreamListenerParameterAdapter streamListenerParameterAdapter : adapters) {
|
||||
if (streamListenerParameterAdapter.supports(stream.getClass(),
|
||||
methodParameter)) {
|
||||
arguments[parameterIndex] = streamListenerParameterAdapter
|
||||
.adapt(stream, methodParameter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (arguments[parameterIndex] == null
|
||||
&& parameterType.isAssignableFrom(stream.getClass())) {
|
||||
arguments[parameterIndex] = stream;
|
||||
}
|
||||
Assert.notNull(arguments[parameterIndex],
|
||||
"Cannot convert argument " + parameterIndex + " of "
|
||||
+ method + "from " + stream.getClass() + " to "
|
||||
+ parameterType);
|
||||
}
|
||||
else {
|
||||
handleKTableGlobalKTableInputs(arguments, parameterIndex, inboundName, parameterType, targetBean, streamsBuilderFactoryBean,
|
||||
streamsBuilder, extendedConsumerProperties, keySerde, valueSerde, autoOffsetReset);
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new IllegalStateException(ex);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(
|
||||
StreamListenerErrorMessages.INVALID_DECLARATIVE_METHOD_PARAMETERS);
|
||||
}
|
||||
}
|
||||
return arguments;
|
||||
}
|
||||
|
||||
private StoreBuilder buildStateStore(KafkaStreamsStateStoreProperties spec) {
|
||||
try {
|
||||
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver
|
||||
.getStateStoreKeySerde(spec.getKeySerdeString());
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver
|
||||
.getStateStoreValueSerde(spec.getValueSerdeString());
|
||||
StoreBuilder builder;
|
||||
switch (spec.getType()) {
|
||||
case KEYVALUE:
|
||||
builder = Stores.keyValueStoreBuilder(
|
||||
Stores.persistentKeyValueStore(spec.getName()), keySerde,
|
||||
valueSerde);
|
||||
break;
|
||||
case WINDOW:
|
||||
builder = Stores
|
||||
.windowStoreBuilder(
|
||||
Stores.persistentWindowStore(spec.getName(),
|
||||
spec.getRetention(), 3, spec.getLength(), false),
|
||||
keySerde, valueSerde);
|
||||
break;
|
||||
case SESSION:
|
||||
builder = Stores.sessionStoreBuilder(Stores.persistentSessionStore(
|
||||
spec.getName(), spec.getRetention()), keySerde, valueSerde);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException(
|
||||
"state store type (" + spec.getType() + ") is not supported!");
|
||||
}
|
||||
if (spec.isCacheEnabled()) {
|
||||
builder = builder.withCachingEnabled();
|
||||
}
|
||||
if (spec.isLoggingDisabled()) {
|
||||
builder = builder.withLoggingDisabled();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
LOG.error("failed to build state store exception : " + ex);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
private KStream<?, ?> getkStream(String inboundName,
|
||||
KafkaStreamsStateStoreProperties storeSpec,
|
||||
BindingProperties bindingProperties,
|
||||
KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde,
|
||||
Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
|
||||
if (storeSpec != null) {
|
||||
StoreBuilder storeBuilder = buildStateStore(storeSpec);
|
||||
streamsBuilder.addStateStore(storeBuilder);
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("state store " + storeBuilder.name() + " added to topology");
|
||||
}
|
||||
}
|
||||
return getKStream(inboundName, bindingProperties, kafkaStreamsConsumerProperties, streamsBuilder,
|
||||
keySerde, valueSerde, autoOffsetReset, firstBuild);
|
||||
}
|
||||
|
||||
private void validateStreamListenerMethod(StreamListener streamListener,
|
||||
Method method, String[] methodAnnotatedOutboundNames) {
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
if (methodAnnotatedOutboundNames != null) {
|
||||
for (String s : methodAnnotatedOutboundNames) {
|
||||
if (StringUtils.hasText(s)) {
|
||||
Assert.isTrue(isDeclarativeOutput(method, s),
|
||||
"Method must be declarative");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (StringUtils.hasText(methodAnnotatedInboundName)) {
|
||||
int methodArgumentsLength = method.getParameterTypes().length;
|
||||
|
||||
for (int parameterIndex = 0; parameterIndex < methodArgumentsLength; parameterIndex++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method,
|
||||
parameterIndex);
|
||||
Assert.isTrue(
|
||||
isDeclarativeInput(methodAnnotatedInboundName, methodParameter),
|
||||
"Method must be declarative");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeOutput(Method m, String targetBeanName) {
|
||||
boolean declarative;
|
||||
Class<?> returnType = m.getReturnType();
|
||||
if (returnType.isArray()) {
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch((slpa) -> slpa.supports(returnType.getComponentType(),
|
||||
targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch((slpa) -> slpa.supports(returnType, targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeInput(String targetBeanName,
|
||||
MethodParameter methodParameter) {
|
||||
if (!methodParameter.getParameterType().isAssignableFrom(Object.class)
|
||||
&& this.applicationContext.containsBean(targetBeanName)) {
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
if (targetBeanClass != null) {
|
||||
boolean supports = KafkaStreamsBinderUtils.supportsKStream(methodParameter, targetBeanClass);
|
||||
if (!supports) {
|
||||
supports = KTable.class.isAssignableFrom(targetBeanClass)
|
||||
&& KTable.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
if (!supports) {
|
||||
supports = GlobalKTable.class.isAssignableFrom(targetBeanClass)
|
||||
&& GlobalKTable.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
}
|
||||
}
|
||||
return supports;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static String[] getOutboundBindingTargetNames(Method method) {
|
||||
SendTo sendTo = AnnotationUtils.findAnnotation(method, SendTo.class);
|
||||
if (sendTo != null) {
|
||||
Assert.isTrue(!ObjectUtils.isEmpty(sendTo.value()),
|
||||
StreamListenerErrorMessages.ATLEAST_ONE_OUTPUT);
|
||||
Assert.isTrue(sendTo.value().length >= 1,
|
||||
"At least one outbound destination need to be provided.");
|
||||
return sendTo.value();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private KafkaStreamsStateStoreProperties buildStateStoreSpec(Method method) {
|
||||
if (!this.registeredStoresPerMethod.containsKey(method)) {
|
||||
KafkaStreamsStateStore spec = AnnotationUtils.findAnnotation(method,
|
||||
KafkaStreamsStateStore.class);
|
||||
if (spec != null) {
|
||||
Assert.isTrue(!ObjectUtils.isEmpty(spec.name()), "name cannot be empty");
|
||||
Assert.isTrue(spec.name().length() >= 1, "name cannot be empty.");
|
||||
this.registeredStoresPerMethod.put(method, new ArrayList<>());
|
||||
this.registeredStoresPerMethod.get(method).add(spec.name());
|
||||
KafkaStreamsStateStoreProperties props = new KafkaStreamsStateStoreProperties();
|
||||
props.setName(spec.name());
|
||||
props.setType(spec.type());
|
||||
props.setLength(spec.lengthMs());
|
||||
props.setKeySerdeString(spec.keySerde());
|
||||
props.setRetention(spec.retentionMs());
|
||||
props.setValueSerdeString(spec.valueSerde());
|
||||
props.setCacheEnabled(spec.cache());
|
||||
props.setLoggingDisabled(!spec.logging());
|
||||
return props;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,435 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.annotation.AnnotatedBeanDefinition;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Resolver for key and value Serde.
|
||||
*
|
||||
* On the inbound, if native decoding is enabled, then any deserialization on the value is
|
||||
* handled by Kafka. First, we look for any key/value Serde set on the binding itself, if
|
||||
* that is not available then look at the common Serde set at the global level. If that
|
||||
* fails, it falls back to byte[]. If native decoding is disabled, then the binder will do
|
||||
* the deserialization on value and ignore any Serde set for value and rely on the
|
||||
* contentType provided. Keys are always deserialized at the broker.
|
||||
*
|
||||
*
|
||||
* Same rules apply on the outbound. If native encoding is enabled, then value
|
||||
* serialization is done at the broker using any binder level Serde for value, if not
|
||||
* using common Serde, if not, then byte[]. If native encoding is disabled, then the
|
||||
* binder will do serialization using a contentType. Keys are always serialized by the
|
||||
* broker.
|
||||
*
|
||||
* For state store, use serdes class specified in
|
||||
* {@link org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsStateStore}
|
||||
* to create Serde accordingly.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Lei Chen
|
||||
*/
|
||||
public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(KeyValueSerdeResolver.class);
|
||||
|
||||
private final Map<String, Object> streamConfigGlobalProperties;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private ConfigurableApplicationContext context;
|
||||
|
||||
KeyValueSerdeResolver(Map<String, Object> streamConfigGlobalProperties,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.streamConfigGlobalProperties = streamConfigGlobalProperties;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for inbound key.
|
||||
* @param extendedConsumerProperties binding level extended
|
||||
* {@link KafkaStreamsConsumerProperties}
|
||||
* @return configurd {@link Serde} for the inbound key.
|
||||
*/
|
||||
public Serde<?> getInboundKeySerde(
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
String keySerdeString = extendedConsumerProperties.getKeySerde();
|
||||
|
||||
return getKeySerde(keySerdeString);
|
||||
}
|
||||
|
||||
public Serde<?> getInboundKeySerde(
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties, ResolvableType resolvableType) {
|
||||
String keySerdeString = extendedConsumerProperties.getKeySerde();
|
||||
|
||||
return getKeySerde(keySerdeString, resolvableType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for inbound value.
|
||||
* @param consumerProperties {@link ConsumerProperties} on binding
|
||||
* @param extendedConsumerProperties binding level extended
|
||||
* {@link KafkaStreamsConsumerProperties}
|
||||
* @return configurd {@link Serde} for the inbound value.
|
||||
*/
|
||||
public Serde<?> getInboundValueSerde(ConsumerProperties consumerProperties,
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
Serde<?> valueSerde;
|
||||
|
||||
String valueSerdeString = extendedConsumerProperties.getValueSerde();
|
||||
try {
|
||||
if (consumerProperties != null && consumerProperties.isUseNativeDecoding()) {
|
||||
valueSerde = getValueSerde(valueSerdeString);
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(this.streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public Serde<?> getInboundValueSerde(ConsumerProperties consumerProperties,
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties,
|
||||
ResolvableType resolvableType) {
|
||||
Serde<?> valueSerde;
|
||||
|
||||
String valueSerdeString = extendedConsumerProperties.getValueSerde();
|
||||
try {
|
||||
if (consumerProperties != null && consumerProperties.isUseNativeDecoding()) {
|
||||
valueSerde = getValueSerde(valueSerdeString, resolvableType);
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(this.streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for outbound key.
|
||||
* @param properties binding level extended {@link KafkaStreamsProducerProperties}
|
||||
* @return configurd {@link Serde} for the outbound key.
|
||||
*/
|
||||
public Serde<?> getOuboundKeySerde(KafkaStreamsProducerProperties properties) {
|
||||
return getKeySerde(properties.getKeySerde());
|
||||
}
|
||||
|
||||
public Serde<?> getOuboundKeySerde(KafkaStreamsProducerProperties properties, ResolvableType resolvableType) {
|
||||
return getKeySerde(properties.getKeySerde(), resolvableType);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for outbound value.
|
||||
* @param producerProperties {@link ProducerProperties} on binding
|
||||
* @param kafkaStreamsProducerProperties binding level extended
|
||||
* {@link KafkaStreamsProducerProperties}
|
||||
* @return configurd {@link Serde} for the outbound value.
|
||||
*/
|
||||
public Serde<?> getOutboundValueSerde(ProducerProperties producerProperties,
|
||||
KafkaStreamsProducerProperties kafkaStreamsProducerProperties) {
|
||||
Serde<?> valueSerde;
|
||||
try {
|
||||
if (producerProperties.isUseNativeEncoding()) {
|
||||
valueSerde = getValueSerde(
|
||||
kafkaStreamsProducerProperties.getValueSerde());
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(this.streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public Serde<?> getOutboundValueSerde(ProducerProperties producerProperties,
|
||||
KafkaStreamsProducerProperties kafkaStreamsProducerProperties, ResolvableType resolvableType) {
|
||||
Serde<?> valueSerde;
|
||||
try {
|
||||
if (producerProperties.isUseNativeEncoding()) {
|
||||
valueSerde = getValueSerde(
|
||||
kafkaStreamsProducerProperties.getValueSerde(), resolvableType);
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(this.streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for state store.
|
||||
* @param keySerdeString serde class used for key
|
||||
* @return {@link Serde} for the state store key.
|
||||
*/
|
||||
public Serde<?> getStateStoreKeySerde(String keySerdeString) {
|
||||
return getKeySerde(keySerdeString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for state store value.
|
||||
* @param valueSerdeString serde class used for value
|
||||
* @return {@link Serde} for the state store value.
|
||||
*/
|
||||
public Serde<?> getStateStoreValueSerde(String valueSerdeString) {
|
||||
try {
|
||||
return getValueSerde(valueSerdeString);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private Serde<?> getKeySerde(String keySerdeString) {
|
||||
Serde<?> keySerde;
|
||||
try {
|
||||
if (StringUtils.hasText(keySerdeString)) {
|
||||
keySerde = Utils.newInstance(keySerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
keySerde = getFallbackSerde("default.key.serde");
|
||||
}
|
||||
keySerde.configure(this.streamConfigGlobalProperties, true);
|
||||
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
private Serde<?> getKeySerde(String keySerdeString, ResolvableType resolvableType) {
|
||||
Serde<?> keySerde = null;
|
||||
try {
|
||||
if (StringUtils.hasText(keySerdeString)) {
|
||||
keySerde = Utils.newInstance(keySerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
if (resolvableType != null &&
|
||||
(isResolvalbeKafkaStreamsType(resolvableType) || isResolvableKStreamArrayType(resolvableType))) {
|
||||
ResolvableType generic = resolvableType.isArray() ? resolvableType.getComponentType().getGeneric(0) : resolvableType.getGeneric(0);
|
||||
Serde<?> fallbackSerde = getFallbackSerde("default.key.serde");
|
||||
keySerde = getSerde(generic, fallbackSerde);
|
||||
}
|
||||
if (keySerde == null) {
|
||||
keySerde = Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
keySerde.configure(this.streamConfigGlobalProperties, true);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
private boolean isResolvableKStreamArrayType(ResolvableType resolvableType) {
|
||||
return resolvableType.isArray() &&
|
||||
KStream.class.isAssignableFrom(resolvableType.getComponentType().getRawClass());
|
||||
}
|
||||
|
||||
private boolean isResolvalbeKafkaStreamsType(ResolvableType resolvableType) {
|
||||
return resolvableType.getRawClass() != null && (KStream.class.isAssignableFrom(resolvableType.getRawClass()) || KTable.class.isAssignableFrom(resolvableType.getRawClass()) ||
|
||||
GlobalKTable.class.isAssignableFrom(resolvableType.getRawClass()));
|
||||
}
|
||||
|
||||
private Serde<?> getSerde(ResolvableType generic, Serde<?> fallbackSerde) {
|
||||
Serde<?> serde = null;
|
||||
|
||||
Map<String, Serde> beansOfType = context.getBeansOfType(Serde.class);
|
||||
Serde<?>[] serdeBeans = new Serde<?>[1];
|
||||
|
||||
final Class<?> genericRawClazz = generic.getRawClass();
|
||||
beansOfType.forEach((k, v) -> {
|
||||
final Class<?> classObj = ClassUtils.resolveClassName(((AnnotatedBeanDefinition)
|
||||
context.getBeanFactory().getBeanDefinition(k))
|
||||
.getMetadata().getClassName(),
|
||||
ClassUtils.getDefaultClassLoader());
|
||||
try {
|
||||
Method[] methods = classObj.getMethods();
|
||||
Optional<Method> serdeBeanMethod = Arrays.stream(methods).filter(m -> m.getName().equals(k)).findFirst();
|
||||
if (serdeBeanMethod.isPresent()) {
|
||||
Method method = serdeBeanMethod.get();
|
||||
ResolvableType resolvableType = ResolvableType.forMethodReturnType(method, classObj);
|
||||
ResolvableType serdeBeanGeneric = resolvableType.getGeneric(0);
|
||||
Class<?> serdeGenericRawClazz = serdeBeanGeneric.getRawClass();
|
||||
if (serdeGenericRawClazz != null && genericRawClazz != null) {
|
||||
if (serdeGenericRawClazz.isAssignableFrom(genericRawClazz)) {
|
||||
serdeBeans[0] = v;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Pass through...
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
if (serdeBeans[0] != null) {
|
||||
return serdeBeans[0];
|
||||
}
|
||||
|
||||
if (genericRawClazz != null) {
|
||||
if (Integer.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Integer();
|
||||
}
|
||||
else if (Long.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Long();
|
||||
}
|
||||
else if (Short.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Short();
|
||||
}
|
||||
else if (Double.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Double();
|
||||
}
|
||||
else if (Float.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Float();
|
||||
}
|
||||
else if (byte[].class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.ByteArray();
|
||||
}
|
||||
else if (String.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.String();
|
||||
}
|
||||
else if (UUID.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.UUID();
|
||||
}
|
||||
else if (!isSerdeFromStandardDefaults(fallbackSerde)) {
|
||||
//User purposely set a default serde that is not one of the above
|
||||
serde = fallbackSerde;
|
||||
}
|
||||
else {
|
||||
// If the type is Object, then skip assigning the JsonSerde and let the fallback mechanism takes precedence.
|
||||
if (!genericRawClazz.isAssignableFrom((Object.class))) {
|
||||
serde = new JsonSerde(genericRawClazz);
|
||||
}
|
||||
}
|
||||
}
|
||||
return serde;
|
||||
}
|
||||
|
||||
private boolean isSerdeFromStandardDefaults(Serde<?> serde) {
|
||||
if (serde != null) {
|
||||
if (Number.class.isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
else if (Serdes.ByteArray().getClass().isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
else if (Serdes.String().getClass().isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
else if (Serdes.UUID().getClass().isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
private Serde<?> getValueSerde(String valueSerdeString)
|
||||
throws ClassNotFoundException {
|
||||
Serde<?> valueSerde;
|
||||
if (StringUtils.hasText(valueSerdeString)) {
|
||||
valueSerde = Utils.newInstance(valueSerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
valueSerde = getFallbackSerde("default.value.serde");
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
private Serde<?> getFallbackSerde(String s) throws ClassNotFoundException {
|
||||
return this.binderConfigurationProperties.getConfiguration()
|
||||
.containsKey(s)
|
||||
? Utils.newInstance(this.binderConfigurationProperties
|
||||
.getConfiguration().get(s),
|
||||
Serde.class)
|
||||
: Serdes.ByteArray();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Serde<?> getValueSerde(String valueSerdeString, ResolvableType resolvableType)
|
||||
throws ClassNotFoundException {
|
||||
Serde<?> valueSerde = null;
|
||||
if (StringUtils.hasText(valueSerdeString)) {
|
||||
valueSerde = Utils.newInstance(valueSerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
|
||||
if (resolvableType != null && ((isResolvalbeKafkaStreamsType(resolvableType)) ||
|
||||
(isResolvableKStreamArrayType(resolvableType)))) {
|
||||
Serde<?> fallbackSerde = getFallbackSerde("default.value.serde");
|
||||
ResolvableType generic = resolvableType.isArray() ? resolvableType.getComponentType().getGeneric(1) : resolvableType.getGeneric(1);
|
||||
valueSerde = getSerde(generic, fallbackSerde);
|
||||
}
|
||||
if (valueSerde == null) {
|
||||
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
context = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
||||
import org.springframework.kafka.listener.ConsumerRecordRecoverer;
|
||||
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
|
||||
|
||||
/**
|
||||
* Custom implementation for {@link ConsumerRecordRecoverer} that keeps a collection of
|
||||
* recoverer objects per input topics. These topics might be per input binding or multiplexed
|
||||
* topics in a single binding.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.0.0
|
||||
*/
|
||||
public class SendToDlqAndContinue implements ConsumerRecordRecoverer {
|
||||
|
||||
/**
|
||||
* DLQ dispatcher per topic in the application context. The key here is not the actual
|
||||
* DLQ topic but the incoming topic that caused the error.
|
||||
*/
|
||||
private Map<String, DeadLetterPublishingRecoverer> dlqDispatchers = new HashMap<>();
|
||||
|
||||
/**
|
||||
* For a given topic, send the key/value record to DLQ topic.
|
||||
*
|
||||
* @param consumerRecord consumer record
|
||||
* @param exception exception
|
||||
*/
|
||||
public void sendToDlq(ConsumerRecord<?, ?> consumerRecord, Exception exception) {
|
||||
DeadLetterPublishingRecoverer kafkaStreamsDlqDispatch = this.dlqDispatchers.get(consumerRecord.topic());
|
||||
kafkaStreamsDlqDispatch.accept(consumerRecord, exception);
|
||||
}
|
||||
|
||||
void addKStreamDlqDispatch(String topic,
|
||||
DeadLetterPublishingRecoverer kafkaStreamsDlqDispatch) {
|
||||
this.dlqDispatchers.put(topic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(ConsumerRecord<?, ?> consumerRecord, Exception e) {
|
||||
this.dlqDispatchers.get(consumerRecord.topic()).accept(consumerRecord, e);
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import org.springframework.context.SmartLifecycle;
|
||||
import org.springframework.kafka.KafkaException;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* Iterate through all {@link StreamsBuilderFactoryBean} in the application context and
|
||||
* start them. As each one completes starting, register the associated KafkaStreams object
|
||||
* into {@link InteractiveQueryService}.
|
||||
*
|
||||
* This {@link SmartLifecycle} class ensures that the bean created from it is started very
|
||||
* late through the bootstrap process by setting the phase value closer to
|
||||
* Integer.MAX_VALUE. This is to guarantee that the {@link StreamsBuilderFactoryBean} on a
|
||||
* {@link org.springframework.cloud.stream.annotation.StreamListener} method with multiple
|
||||
* bindings is only started after all the binding phases have completed successfully.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
private final KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics;
|
||||
|
||||
private volatile boolean running;
|
||||
|
||||
StreamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry, KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics) {
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.kafkaStreamsBinderMetrics = kafkaStreamsBinderMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoStartup() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(Runnable callback) {
|
||||
stop();
|
||||
if (callback != null) {
|
||||
callback.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!this.running) {
|
||||
try {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.start();
|
||||
this.kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean);
|
||||
}
|
||||
if (this.kafkaStreamsBinderMetrics != null) {
|
||||
this.kafkaStreamsBinderMetrics.addMetrics(streamsBuilderFactoryBeans);
|
||||
}
|
||||
this.running = true;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new KafkaException("Could not start stream: ", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (this.running) {
|
||||
try {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.stop();
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new IllegalStateException(ex);
|
||||
}
|
||||
finally {
|
||||
this.running = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isRunning() {
|
||||
return this.running;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPhase() {
|
||||
return Integer.MAX_VALUE - 100;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.annotations;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
|
||||
/**
|
||||
* Bindable interface for {@link KStream} input and output.
|
||||
*
|
||||
* This interface can be used as a bindable interface with
|
||||
* {@link org.springframework.cloud.stream.annotation.EnableBinding} when both input and
|
||||
* output types are single KStream. In other scenarios where multiple types are required,
|
||||
* other similar bindable interfaces can be created and used. For example, there are cases
|
||||
* in which multiple KStreams are required on the outbound in the case of KStream
|
||||
* branching or multiple input types are required either in the form of multiple KStreams
|
||||
* and a combination of KStreams and KTables. In those cases, new bindable interfaces
|
||||
* compatible with the requirements must be created. Here are some examples.
|
||||
*
|
||||
* <pre class="code">
|
||||
* interface KStreamBranchProcessor {
|
||||
* @Input("input")
|
||||
* KStream<?, ?> input();
|
||||
*
|
||||
* @Output("output-1")
|
||||
* KStream<?, ?> output1();
|
||||
*
|
||||
* @Output("output-2")
|
||||
* KStream<?, ?> output2();
|
||||
*
|
||||
* @Output("output-3")
|
||||
* KStream<?, ?> output3();
|
||||
*
|
||||
* ......
|
||||
*
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* <pre class="code">
|
||||
* interface KStreamKtableProcessor {
|
||||
* @Input("input-1")
|
||||
* KStream<?, ?> input1();
|
||||
*
|
||||
* @Input("input-2")
|
||||
* KTable<?, ?> input2();
|
||||
*
|
||||
* @Output("output")
|
||||
* KStream<?, ?> output();
|
||||
*
|
||||
* ......
|
||||
*
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public interface KafkaStreamsProcessor {
|
||||
|
||||
/**
|
||||
* Input binding.
|
||||
* @return {@link Input} binding for {@link KStream} type.
|
||||
*/
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
/**
|
||||
* Output binding.
|
||||
* @return {@link Output} binding for {@link KStream} type.
|
||||
*/
|
||||
@Output("output")
|
||||
KStream<?, ?> output();
|
||||
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.annotations;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
|
||||
/**
|
||||
* Interface for Kafka Stream state store.
|
||||
*
|
||||
* This interface can be used to inject a state store specification into KStream building
|
||||
* process so that the desired store can be built by StreamBuilder and added to topology
|
||||
* for later use by processors. This is particularly useful when need to combine stream
|
||||
* DSL with low level processor APIs. In those cases, if a writable state store is desired
|
||||
* in processors, it needs to be created using this annotation. Here is the example.
|
||||
*
|
||||
* <pre class="code">
|
||||
* @StreamListener("input")
|
||||
* @KafkaStreamsStateStore(name="mystate", type= KafkaStreamsStateStoreProperties.StoreType.WINDOW,
|
||||
* size=300000)
|
||||
* public void process(KStream<Object, Product> input) {
|
||||
* ......
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* With that, you should be able to read/write this state store in your
|
||||
* processor/transformer code.
|
||||
*
|
||||
* <pre class="code">
|
||||
* new Processor<Object, Product>() {
|
||||
* WindowStore<Object, String> state;
|
||||
* @Override
|
||||
* public void init(ProcessorContext processorContext) {
|
||||
* state = (WindowStore)processorContext.getStateStore("mystate");
|
||||
* ......
|
||||
* }
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* @author Lei Chen
|
||||
*/
|
||||
|
||||
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE })
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
|
||||
public @interface KafkaStreamsStateStore {
|
||||
|
||||
/**
|
||||
* Provides name of the state store.
|
||||
* @return name of state store.
|
||||
*/
|
||||
String name() default "";
|
||||
|
||||
/**
|
||||
* State store type.
|
||||
* @return {@link KafkaStreamsStateStoreProperties.StoreType} of state store.
|
||||
*/
|
||||
KafkaStreamsStateStoreProperties.StoreType type() default KafkaStreamsStateStoreProperties.StoreType.KEYVALUE;
|
||||
|
||||
/**
|
||||
* Serde used for key.
|
||||
* @return key serde of state store.
|
||||
*/
|
||||
String keySerde() default "org.apache.kafka.common.serialization.Serdes$StringSerde";
|
||||
|
||||
/**
|
||||
* Serde used for value.
|
||||
* @return value serde of state store.
|
||||
*/
|
||||
String valueSerde() default "org.apache.kafka.common.serialization.Serdes$StringSerde";
|
||||
|
||||
/**
|
||||
* Length in milli-second of Windowed store window.
|
||||
* @return length in milli-second of window(for windowed store).
|
||||
*/
|
||||
long lengthMs() default 0;
|
||||
|
||||
/**
|
||||
* Retention period for Windowed store windows.
|
||||
* @return the maximum period of time in milli-second to keep each window in this
|
||||
* store(for windowed store).
|
||||
*/
|
||||
long retentionMs() default 0;
|
||||
|
||||
/**
|
||||
* Whether catching is enabled or not.
|
||||
* @return whether caching should be enabled on the created store.
|
||||
*/
|
||||
boolean cache() default false;
|
||||
|
||||
/**
|
||||
* Whether logging is enabled or not.
|
||||
* @return whether logging should be enabled on the created store.
|
||||
*/
|
||||
boolean logging() default true;
|
||||
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.beans.factory.BeanFactoryUtils;
|
||||
import org.springframework.beans.factory.annotation.AnnotatedBeanDefinition;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionOutcome;
|
||||
import org.springframework.boot.autoconfigure.condition.SpringBootCondition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
/**
|
||||
* Custom {@link org.springframework.context.annotation.Condition} that detects the presence
|
||||
* of java.util.Function|Consumer beans. Used for Kafka Streams function support.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.2.0
|
||||
*/
|
||||
public class FunctionDetectorCondition extends SpringBootCondition {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FunctionDetectorCondition.class);
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
@Override
|
||||
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
|
||||
if (context != null && context.getBeanFactory() != null) {
|
||||
|
||||
String[] functionTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), Function.class, true, false);
|
||||
String[] consumerTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), Consumer.class, true, false);
|
||||
String[] biFunctionTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), BiFunction.class, true, false);
|
||||
String[] biConsumerTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), BiConsumer.class, true, false);
|
||||
|
||||
List<String> functionComponents = new ArrayList<>();
|
||||
|
||||
functionComponents.addAll(Arrays.asList(functionTypes));
|
||||
functionComponents.addAll(Arrays.asList(consumerTypes));
|
||||
functionComponents.addAll(Arrays.asList(biFunctionTypes));
|
||||
functionComponents.addAll(Arrays.asList(biConsumerTypes));
|
||||
|
||||
List<String> kafkaStreamsFunctions = pruneFunctionBeansForKafkaStreams(functionComponents, context);
|
||||
if (!CollectionUtils.isEmpty(kafkaStreamsFunctions)) {
|
||||
return ConditionOutcome.match("Matched. Function/BiFunction/Consumer beans found");
|
||||
}
|
||||
else {
|
||||
return ConditionOutcome.noMatch("No match. No Function/BiFunction/Consumer beans found");
|
||||
}
|
||||
}
|
||||
return ConditionOutcome.noMatch("No match. No Function/BiFunction/Consumer beans found");
|
||||
}
|
||||
|
||||
private static List<String> pruneFunctionBeansForKafkaStreams(List<String> strings,
|
||||
ConditionContext context) {
|
||||
final List<String> prunedList = new ArrayList<>();
|
||||
|
||||
for (String key : strings) {
|
||||
final Class<?> classObj = ClassUtils.resolveClassName(((AnnotatedBeanDefinition)
|
||||
context.getBeanFactory().getBeanDefinition(key))
|
||||
.getMetadata().getClassName(),
|
||||
ClassUtils.getDefaultClassLoader());
|
||||
try {
|
||||
Method[] methods = classObj.getMethods();
|
||||
Optional<Method> kafkaStreamMethod = Arrays.stream(methods).filter(m -> m.getName().equals(key)).findFirst();
|
||||
if (kafkaStreamMethod.isPresent()) {
|
||||
Method method = kafkaStreamMethod.get();
|
||||
ResolvableType resolvableType = ResolvableType.forMethodReturnType(method, classObj);
|
||||
final Class<?> rawClass = resolvableType.getGeneric(0).getRawClass();
|
||||
if (rawClass == KStream.class || rawClass == KTable.class || rawClass == GlobalKTable.class) {
|
||||
prunedList.add(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.error("Function not found: " + key, e);
|
||||
}
|
||||
}
|
||||
return prunedList;
|
||||
}
|
||||
}
|
||||
@@ -1,236 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanFactory;
|
||||
import org.springframework.beans.factory.BeanFactoryAware;
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.beans.factory.support.RootBeanDefinition;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindableProxyFactory;
|
||||
import org.springframework.cloud.stream.binding.BoundTargetHolder;
|
||||
import org.springframework.cloud.stream.function.FunctionConstants;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
/**
|
||||
* Kafka Streams specific target bindings proxy factory. See {@link AbstractBindableProxyFactory} for more details.
|
||||
*
|
||||
* Targets bound by this factory:
|
||||
*
|
||||
* {@link KStream}
|
||||
* {@link KTable}
|
||||
* {@link GlobalKTable}
|
||||
*
|
||||
* This class looks at the Function bean's return signature as {@link ResolvableType} and introspect the individual types,
|
||||
* binding them on the way.
|
||||
*
|
||||
* All types on the {@link ResolvableType} are bound except for KStream[] array types on the outbound, which will be
|
||||
* deferred for binding at a later stage. The reason for doing that is because in this class, we don't have any way to know
|
||||
* the actual size in the returned array. That has to wait until the function is invoked and we get a result.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class KafkaStreamsBindableProxyFactory extends AbstractBindableProxyFactory implements InitializingBean, BeanFactoryAware {
|
||||
|
||||
@Autowired
|
||||
private StreamFunctionProperties streamFunctionProperties;
|
||||
|
||||
private final ResolvableType type;
|
||||
|
||||
private final String functionName;
|
||||
|
||||
private BeanFactory beanFactory;
|
||||
|
||||
|
||||
public KafkaStreamsBindableProxyFactory(ResolvableType type, String functionName) {
|
||||
super(type.getType().getClass());
|
||||
this.type = type;
|
||||
this.functionName = functionName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() {
|
||||
Assert.notEmpty(KafkaStreamsBindableProxyFactory.this.bindingTargetFactories,
|
||||
"'bindingTargetFactories' cannot be empty");
|
||||
|
||||
int resolvableTypeDepthCounter = 0;
|
||||
ResolvableType argument = this.type.getGeneric(resolvableTypeDepthCounter++);
|
||||
List<String> inputBindings = buildInputBindings();
|
||||
Iterator<String> iterator = inputBindings.iterator();
|
||||
String next = iterator.next();
|
||||
bindInput(argument, next);
|
||||
|
||||
if (this.type.getRawClass() != null &&
|
||||
(this.type.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
this.type.getRawClass().isAssignableFrom(BiConsumer.class))) {
|
||||
argument = this.type.getGeneric(resolvableTypeDepthCounter++);
|
||||
next = iterator.next();
|
||||
bindInput(argument, next);
|
||||
}
|
||||
ResolvableType outboundArgument = this.type.getGeneric(resolvableTypeDepthCounter);
|
||||
|
||||
while (isAnotherFunctionOrConsumerFound(outboundArgument)) {
|
||||
//The function is a curried function. We should introspect the partial function chain hierarchy.
|
||||
argument = outboundArgument.getGeneric(0);
|
||||
String next1 = iterator.next();
|
||||
bindInput(argument, next1);
|
||||
outboundArgument = outboundArgument.getGeneric(1);
|
||||
}
|
||||
|
||||
//Introspect output for binding.
|
||||
if (outboundArgument != null && outboundArgument.getRawClass() != null && (!outboundArgument.isArray() &&
|
||||
outboundArgument.getRawClass().isAssignableFrom(KStream.class))) {
|
||||
// if the type is array, we need to do a late binding as we don't know the number of
|
||||
// output bindings at this point in the flow.
|
||||
|
||||
List<String> outputBindings = streamFunctionProperties.getOutputBindings(this.functionName);
|
||||
String outputBinding = null;
|
||||
|
||||
if (!CollectionUtils.isEmpty(outputBindings)) {
|
||||
Iterator<String> outputBindingsIter = outputBindings.iterator();
|
||||
if (outputBindingsIter.hasNext()) {
|
||||
outputBinding = outputBindingsIter.next();
|
||||
}
|
||||
|
||||
}
|
||||
else {
|
||||
outputBinding = String.format("%s-%s-0", this.functionName, FunctionConstants.DEFAULT_OUTPUT_SUFFIX);
|
||||
}
|
||||
Assert.isTrue(outputBinding != null, "output binding is not inferred.");
|
||||
KafkaStreamsBindableProxyFactory.this.outputHolders.put(outputBinding,
|
||||
new BoundTargetHolder(getBindingTargetFactory(KStream.class)
|
||||
.createOutput(outputBinding), true));
|
||||
String outputBinding1 = outputBinding;
|
||||
RootBeanDefinition rootBeanDefinition1 = new RootBeanDefinition();
|
||||
rootBeanDefinition1.setInstanceSupplier(() -> outputHolders.get(outputBinding1).getBoundTarget());
|
||||
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) beanFactory;
|
||||
registry.registerBeanDefinition(outputBinding1, rootBeanDefinition1);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isAnotherFunctionOrConsumerFound(ResolvableType arg1) {
|
||||
return arg1 != null && !arg1.isArray() && arg1.getRawClass() != null &&
|
||||
(arg1.getRawClass().isAssignableFrom(Function.class) || arg1.getRawClass().isAssignableFrom(Consumer.class));
|
||||
}
|
||||
|
||||
/**
|
||||
* If the application provides the property spring.cloud.stream.function.inputBindings.functionName,
|
||||
* that gets precedence. Otherwise, use functionName-input or functionName-input-0, functionName-input-1 and so on
|
||||
* for multiple inputs.
|
||||
*
|
||||
* @return an ordered collection of input bindings to use
|
||||
*/
|
||||
private List<String> buildInputBindings() {
|
||||
List<String> inputs = new ArrayList<>();
|
||||
List<String> inputBindings = streamFunctionProperties.getInputBindings(this.functionName);
|
||||
if (!CollectionUtils.isEmpty(inputBindings)) {
|
||||
inputs.addAll(inputBindings);
|
||||
return inputs;
|
||||
}
|
||||
int numberOfInputs = this.type.getRawClass() != null &&
|
||||
(this.type.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
this.type.getRawClass().isAssignableFrom(BiConsumer.class)) ? 2 : getNumberOfInputs();
|
||||
int i = 0;
|
||||
while (i < numberOfInputs) {
|
||||
inputs.add(String.format("%s-%s-%d", this.functionName, FunctionConstants.DEFAULT_INPUT_SUFFIX, i++));
|
||||
}
|
||||
return inputs;
|
||||
|
||||
}
|
||||
|
||||
private int getNumberOfInputs() {
|
||||
int numberOfInputs = 1;
|
||||
ResolvableType arg1 = this.type.getGeneric(1);
|
||||
|
||||
while (isAnotherFunctionOrConsumerFound(arg1)) {
|
||||
arg1 = arg1.getGeneric(1);
|
||||
numberOfInputs++;
|
||||
}
|
||||
return numberOfInputs;
|
||||
|
||||
}
|
||||
|
||||
private void bindInput(ResolvableType arg0, String inputName) {
|
||||
if (arg0.getRawClass() != null) {
|
||||
KafkaStreamsBindableProxyFactory.this.inputHolders.put(inputName,
|
||||
new BoundTargetHolder(getBindingTargetFactory(arg0.getRawClass())
|
||||
.createInput(inputName), true));
|
||||
}
|
||||
|
||||
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) beanFactory;
|
||||
|
||||
RootBeanDefinition rootBeanDefinition = new RootBeanDefinition();
|
||||
rootBeanDefinition.setInstanceSupplier(() -> inputHolders.get(inputName).getBoundTarget());
|
||||
registry.registerBeanDefinition(inputName, rootBeanDefinition);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getInputs() {
|
||||
Set<String> ins = new LinkedHashSet<>();
|
||||
this.inputHolders.forEach((s, BoundTargetHolder) -> ins.add(s));
|
||||
return ins;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getOutputs() {
|
||||
Set<String> outs = new LinkedHashSet<>();
|
||||
this.outputHolders.forEach((s, BoundTargetHolder) -> outs.add(s));
|
||||
return outs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
|
||||
this.beanFactory = beanFactory;
|
||||
}
|
||||
|
||||
public void addOutputBinding(String output, Class<?> clazz) {
|
||||
KafkaStreamsBindableProxyFactory.this.outputHolders.put(output,
|
||||
new BoundTargetHolder(getBindingTargetFactory(clazz)
|
||||
.createOutput(output), true));
|
||||
}
|
||||
|
||||
public String getFunctionName() {
|
||||
return functionName;
|
||||
}
|
||||
|
||||
public Map<String, BoundTargetHolder> getOutputHolders() {
|
||||
return outputHolders;
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user